diff --git a/.github/actions/install-go/action.yml b/.github/actions/install-go/action.yml index 173559284f7..30be7d7f14a 100644 --- a/.github/actions/install-go/action.yml +++ b/.github/actions/install-go/action.yml @@ -1,6 +1,11 @@ name: Install Go description: Install Go for Filecoin Lotus +inputs: + working-directory: + description: Specifies the working directory where the command is run. + required: false + runs: using: composite steps: @@ -10,6 +15,8 @@ runs: cache: false - id: go-mod uses: ipdxco/unified-github-workflows/.github/actions/read-go-mod@main + with: + working-directory: ${{ inputs.working-directory || github.workspace }} - uses: actions/setup-go@v5 with: go-version: ${{ fromJSON(steps.go-mod.outputs.json).Go }}.x diff --git a/.github/actions/start-yugabytedb/action.yml b/.github/actions/start-yugabytedb/action.yml index 13c480c6640..1019d5575f9 100644 --- a/.github/actions/start-yugabytedb/action.yml +++ b/.github/actions/start-yugabytedb/action.yml @@ -4,7 +4,7 @@ description: Install Yugabyte Database for Filecoin Lotus runs: using: composite steps: - - run: docker run --rm --name yugabyte -d -p 5433:5433 yugabytedb/yugabyte:2.18.0.0-b65 bin/yugabyted start --daemon=false + - run: docker run --rm --name yugabyte -d -p 5433:5433 yugabytedb/yugabyte:2.21.0.1-b1 bin/yugabyted start --daemon=false shell: bash - run: | while true; do diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index b5843c5b38e..6422093501b 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -15,7 +15,7 @@ Before you mark the PR ready for review, please make sure that: - [ ] PR title is in the form of of `: : ` - example: ` fix: mempool: Introduce a cache for valid signatures` - `PR type`: fix, feat, build, chore, ci, docs, perf, refactor, revert, style, test - - `area`, e.g. api, chain, state, market, mempool, multisig, networking, paych, proving, sealing, wallet, deps + - `area`, e.g. api, chain, state, mempool, multisig, networking, paych, proving, sealing, wallet, deps - [ ] If the PR affects users (e.g., new feature, bug fix, system requirements change), update the CHANGELOG.md and add details to the UNRELEASED section. - [ ] New features have usage guidelines and / or documentation updates in - [ ] [Lotus Documentation](https://lotus.filecoin.io) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index d7dd59e143e..543b17dd8cd 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -16,7 +16,8 @@ concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: ${{ github.event_name == 'pull_request' }} -permissions: {} +permissions: + contents: read jobs: build: diff --git a/.github/workflows/builtin-actor-tests.yml b/.github/workflows/builtin-actor-tests.yml index 93d4c669e59..c24d8db1f9c 100644 --- a/.github/workflows/builtin-actor-tests.yml +++ b/.github/workflows/builtin-actor-tests.yml @@ -8,7 +8,8 @@ on: branches: - release/* -permissions: {} +permissions: + contents: read jobs: release: diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 2f60cdde77a..6c848221994 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -16,7 +16,8 @@ concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: ${{ github.event_name == 'pull_request' }} -permissions: {} +permissions: + contents: read jobs: check-docsgen: @@ -43,7 +44,6 @@ jobs: - uses: ./.github/actions/install-go - run: make deps lotus - run: go install golang.org/x/tools/cmd/goimports - - run: go install github.com/hannahhoward/cbor-gen-for - run: make gen - run: git diff --exit-code - run: make docsgen-cli @@ -57,7 +57,7 @@ jobs: submodules: 'recursive' - uses: ./.github/actions/install-system-dependencies - uses: ./.github/actions/install-go - - run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.58.2 + - run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.59.0 - run: make deps - run: golangci-lint run -v --timeout 10m --concurrency 4 check-fmt: diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index f6afe68bf01..cfde8bb42e1 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -10,16 +10,21 @@ on: schedule: - cron: '0 0 * * *' workflow_dispatch: + inputs: + ref: + description: The GitHub ref (e.g. refs/tags/v1.0.0) to release + required: false defaults: run: shell: bash -permissions: {} +permissions: + contents: read jobs: docker: - name: Docker (${{ matrix.image }} / ${{ matrix.network }}) [publish=${{ github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/') }}] + name: Docker (${{ matrix.image }} / ${{ matrix.network }}) [publish=${{ (inputs.ref || github.ref) == 'refs/heads/master' || startsWith(inputs.ref || github.ref, 'refs/tags/') }}] runs-on: ubuntu-latest strategy: fail-fast: false @@ -35,13 +40,13 @@ jobs: - image: lotus network: mainnet env: - PUBLISH: ${{ github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/') }} + PUBLISH: ${{ github.ref == 'refs/heads/master' || startsWith(inputs.ref || github.ref, 'refs/tags/') }} steps: - id: channel env: - IS_MASTER: ${{ github.ref == 'refs/heads/master' }} - IS_TAG: ${{ startsWith(github.ref, 'refs/tags/') }} - IS_RC: ${{ endsWith(github.ref, '-rc') }} + IS_MASTER: ${{ (inputs.ref || github.ref) == 'refs/heads/master' }} + IS_TAG: ${{ startsWith(inputs.ref || github.ref, 'refs/tags/') }} + IS_RC: ${{ contains(inputs.ref || github.ref, '-rc') }} IS_SCHEDULED: ${{ github.event_name == 'schedule' }} run: | channel='' @@ -58,10 +63,20 @@ jobs: channel=stable fi fi - echo "channel=$channel" | tee -a $GITHUB_ENV + echo "channel=$channel" | tee -a $GITHUB_OUTPUT - uses: actions/checkout@v4 with: submodules: 'recursive' + ref: ${{ inputs.ref || github.ref }} + - id: git + env: + REF: ${{ inputs.ref || github.ref }} + run: | + ref="${REF#refs/heads/}" + ref="${ref#refs/tags/}" + sha="$(git rev-parse --short HEAD)" + echo "ref=$ref" | tee -a "$GITHUB_OUTPUT" + echo "sha=$sha" | tee -a "$GITHUB_OUTPUT" - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Docker meta @@ -70,10 +85,9 @@ jobs: with: images: filecoin/${{ matrix.image }} tags: | - type=schedule - type=raw,enable=${{ github.event_name != 'schedule' && steps.channel.outputs.channel != '' }},value=${{ steps.channel.outputs.channel }} - type=ref,event=tag - type=sha,prefix= + type=raw,enable=${{ steps.channel.outputs.channel != '' }},value=${{ steps.channel.outputs.channel }} + type=raw,enable=${{ startsWith(inputs.ref || github.ref, 'refs/tags/') }},value=${{ steps.git.outputs.ref }} + type=raw,value=${{ steps.git.outputs.sha }} flavor: | latest=false suffix=${{ matrix.network != 'mainnet' && format('-{0}', matrix.network) || '' }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 797473bb330..12ea6e3f90e 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -8,12 +8,17 @@ on: tags: - v* workflow_dispatch: + inputs: + ref: + description: The GitHub ref (e.g. refs/tags/v1.0.0) to release + required: false defaults: run: shell: bash -permissions: {} +permissions: + contents: read jobs: build: @@ -41,38 +46,57 @@ jobs: echo "::error title=Unexpected Runner::Expected $OS/$ARCH, got $RUNNER_OS/$RUNNER_ARCH" exit 1 fi + - uses: actions/checkout@v4 + with: + path: actions - uses: actions/checkout@v4 with: submodules: 'recursive' - - uses: ./.github/actions/install-system-dependencies - - uses: ./.github/actions/install-go + ref: ${{ inputs.ref || github.ref }} + path: lotus + - uses: ./actions/.github/actions/install-system-dependencies + - uses: ./actions/.github/actions/install-go + with: + working-directory: lotus - env: GITHUB_TOKEN: ${{ github.token }} run: make deps lotus lotus-miner lotus-worker + working-directory: lotus - if: runner.os == 'macOS' run: otool -hv lotus - - run: ./scripts/version-check.sh ./lotus + working-directory: lotus + - env: + INPUTS_REF: ${{ inputs.ref }} + run: | + export GITHUB_REF=${INPUTS_REF:-$GITHUB_REF} + ../actions/scripts/version-check.sh ./lotus + working-directory: lotus - uses: actions/upload-artifact@v4 with: name: lotus-${{ matrix.os }}-${{ matrix.arch }} path: | - lotus - lotus-miner - lotus-worker + lotus/lotus + lotus/lotus-miner + lotus/lotus-worker release: - name: Release [publish=${{ startsWith(github.ref, 'refs/tags/') }}] + name: Release [publish=${{ startsWith(inputs.ref || github.ref, 'refs/tags/') }}] permissions: # This enables the job to create and/or update GitHub releases contents: write runs-on: ubuntu-latest needs: [build] env: - PUBLISH: ${{ startsWith(github.ref, 'refs/tags/') }} + PUBLISH: ${{ startsWith(inputs.ref || github.ref, 'refs/tags/') }} steps: + - uses: actions/checkout@v4 + with: + path: actions - uses: actions/checkout@v4 with: submodules: 'recursive' fetch-depth: 0 + path: lotus + ref: ${{ inputs.ref || github.ref }} - uses: actions/download-artifact@v4 with: name: lotus-Linux-X64 @@ -85,7 +109,9 @@ jobs: with: name: lotus-macOS-ARM64 path: darwin_arm64 - - uses: ./.github/actions/install-go + - uses: ./actions/.github/actions/install-go + with: + working-directory: lotus - uses: ipfs/download-ipfs-distribution-action@v1 with: name: kubo @@ -93,13 +119,23 @@ jobs: - uses: goreleaser/goreleaser-action@7ec5c2b0c6cdda6e8bbb49444bc797dd33d74dd8 # v5.0.0 with: distribution: goreleaser-pro - version: latest - args: release --clean --debug ${{ env.PUBLISH == 'false' && '--snapshot' || '' }} + version: 2.0.1 + args: release --clean ${{ env.PUBLISH == 'false' && '--snapshot' || '' }} + workdir: lotus env: - GITHUB_TOKEN: ${{ env.PUBLISH == 'true' && github.token || '' }} + GITHUB_TOKEN: ${{ env.PUBLISH == 'true' && secrets.GORELEASER_GITUB_TOKEN || github.token || '' }} GORELEASER_KEY: ${{ env.PUBLISH == 'true' && secrets.GORELEASER_KEY || '' }} - - run: ./scripts/generate-checksums.sh + - env: + INPUTS_REF: ${{ inputs.ref }} + run: | + export GITHUB_REF=${INPUTS_REF:-$GITHUB_REF} + ../actions/scripts/generate-checksums.sh + working-directory: lotus - if: env.PUBLISH == 'true' env: GITHUB_TOKEN: ${{ github.token }} - run: ./scripts/publish-checksums.sh + INPUTS_REF: ${{ inputs.ref }} + run: | + export GITHUB_REF=${INPUTS_REF:-$GITHUB_REF} + ../actions/scripts/publish-checksums.sh + working-directory: lotus diff --git a/.github/workflows/sorted-pr-checks.yml b/.github/workflows/sorted-pr-checks.yml index 0c2b0ee8a6f..2ce76cb1761 100644 --- a/.github/workflows/sorted-pr-checks.yml +++ b/.github/workflows/sorted-pr-checks.yml @@ -17,15 +17,17 @@ on: - completed permissions: + actions: read + checks: read pull-requests: write concurrency: - group: ${{ github.workflow }}-${{ github.event.inputs.pull_number || github.event.workflow_run.pull_requests[0].number || 'unknown' }} + group: ${{ github.workflow }}-${{ github.event.inputs.pull_number || github.event.workflow_run.pull_requests[0].number }} cancel-in-progress: true jobs: comment: - if: github.event.inputs.pull_number || github.event.workflow_run.pull_requests[0] + if: github.event.inputs.pull_number || github.event.workflow_run.event == 'pull_request' uses: ipdxco/sorted-pr-checks/.github/workflows/comment.yml@v1 with: pull_number: ${{ github.event.inputs.pull_number || github.event.workflow_run.pull_requests[0].number }} diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 3116da07c74..9def5f67800 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -4,7 +4,8 @@ on: schedule: - cron: '0 12 * * *' -permissions: {} +permissions: + contents: read jobs: stale: diff --git a/.github/workflows/sync-master-main.yaml b/.github/workflows/sync-master-main.yaml index b629b560433..3ffb6932a25 100644 --- a/.github/workflows/sync-master-main.yaml +++ b/.github/workflows/sync-master-main.yaml @@ -5,7 +5,8 @@ on: branches: - master -permissions: {} +permissions: + contents: read jobs: sync: diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index a64a613a878..2a5648a54a5 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -16,7 +16,8 @@ concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: ${{ github.event_name == 'pull_request' }} -permissions: {} +permissions: + contents: read jobs: discover: @@ -96,6 +97,7 @@ jobs: "itest-get_messages_in_ts": ["self-hosted", "linux", "x64", "xlarge"], "itest-lite_migration": ["self-hosted", "linux", "x64", "xlarge"], "itest-lookup_robust_address": ["self-hosted", "linux", "x64", "xlarge"], + "itest-manual_onboarding": ["self-hosted", "linux", "x64", "xlarge"], "itest-mempool": ["self-hosted", "linux", "x64", "xlarge"], "itest-mpool_msg_uuid": ["self-hosted", "linux", "x64", "xlarge"], "itest-mpool_push_with_uuid": ["self-hosted", "linux", "x64", "xlarge"], @@ -115,7 +117,7 @@ jobs: } # A list of test groups that require YugabyteDB to be running yugabytedb: | - ["itest-harmonydb", "itest-harmonytask", "itest-curio"] + ["itest-harmonydb"] # A list of test groups that require Proof Parameters to be fetched parameters: | [ @@ -128,6 +130,7 @@ jobs: "itest-deals", "itest-direct_data_onboard_verified", "itest-direct_data_onboard", + "itest-manual_onboarding", "itest-net", "itest-path_detach_redeclare", "itest-path_type_filters", @@ -144,7 +147,8 @@ jobs: "itest-worker", "multicore-sdr", "unit-cli", - "unit-storage" + "unit-storage", + "itest-curio" ] run: | # Create a list of integration test groups @@ -203,7 +207,7 @@ jobs: echo -e "path< v0.3.2) +- github.com/filecoin-project/go-state-types (v0.13.3 -> v0.14.0-dev) + +## Lotus-Miner / Curio related changes + +- fix logs (#12036) ([filecoin-project/lotus#12036](https://github.com/filecoin-project/lotus/pull/12036)) +- feat: curioweb: Improve task_history indexes (#11911) ([filecoin-project/lotus#11911](https://github.com/filecoin-project/lotus/pull/11911)) +- fix: curio taskstorage: Don't try to free reservations by nulled TaskID (#12018) ([filecoin-project/lotus#12018](https://github.com/filecoin-project/lotus/pull/12018)) +- fix actor string (#12019) ([filecoin-project/lotus#12019](https://github.com/filecoin-project/lotus/pull/12019)) +- fix: curio: Update pgx imports, fix db_storage alloc +- feat: curioweb: Show piece info on the sector page (#11955) ([filecoin-project/lotus#11955](https://github.com/filecoin-project/lotus/pull/11955)) +- curio: feat: break trees task into TreeD(prefetch) and TreeRC (#11895) ([filecoin-project/lotus#11895](https://github.com/filecoin-project/lotus/pull/11895)) +- fix: curio: node UI & darwin gpu count (#11950) ([filecoin-project/lotus#11950](https://github.com/filecoin-project/lotus/pull/11950)) +- feat: curio: Keep more sector metadata in the DB long-term (#11933) ([filecoin-project/lotus#11933](https://github.com/filecoin-project/lotus/pull/11933)) +- fix: curio/lmrpc: Check ParkPiece success before creating sectors (#11975) ([filecoin-project/lotus#11975](https://github.com/filecoin-project/lotus/pull/11975)) +- feat: curio: docker devnet (#11954) ([filecoin-project/lotus#11954](https://github.com/filecoin-project/lotus/pull/11954)) +- feat: curio: alertManager (#11926) ([filecoin-project/lotus#11926](https://github.com/filecoin-project/lotus/pull/11926)) +- curio cfg edit: ux cleanups (#11985) ([filecoin-project/lotus#11985](https://github.com/filecoin-project/lotus/pull/11985)) +- fix: curio: Drop FKs from pipeline to fix retry loops (#11973) ([filecoin-project/lotus#11973](https://github.com/filecoin-project/lotus/pull/11973)) +- Produce DEB files for amd64 for openCL and cuda (#11885) ([filecoin-project/lotus#11885](https://github.com/filecoin-project/lotus/pull/11885)) +- gui-listen fix (#12013) ([filecoin-project/lotus#12013](https://github.com/filecoin-project/lotus/pull/12013)) +- feat: curio: allow multiple pieces per sector (#11935) ([filecoin-project/lotus#11935](https://github.com/filecoin-project/lotus/pull/11935)) +- chore: update yugabyte deps (#12022) ([filecoin-project/lotus#12022](https://github.com/filecoin-project/lotus/pull/12022)) +- fix: harmonydb: Use timestampz instead of timestamp across the schema (#12030) ([filecoin-project/lotus#12030](https://github.com/filecoin-project/lotus/pull/12030)) +- cleanup: miner: remove markets and deal-making from Lotus Miner (#12005) ([filecoin-project/lotus#12005](https://github.com/filecoin-project/lotus/pull/12005)) +- fix non existing sector (#12012) ([filecoin-project/lotus#12012](https://github.com/filecoin-project/lotus/pull/12012)) +- feat: curio ffiselect: Isolate gpu calls in a subprocess (#11994) ([filecoin-project/lotus#11994](https://github.com/filecoin-project/lotus/pull/11994)) +- feat: curio: jsonrpc in webui (#11904) ([filecoin-project/lotus#11904](https://github.com/filecoin-project/lotus/pull/11904)) +- fix: itests: Fix flaky curio itest (#12037) ([filecoin-project/lotus#12037](https://github.com/filecoin-project/lotus/pull/12037)) +- feat: curio: wdPost and wnPost alerts (#12029) ([filecoin-project/lotus#12029](https://github.com/filecoin-project/lotus/pull/12029)) +- fix: storage: Fix a race in GenerateWindowPoStAdv (#12064) ([filecoin-project/lotus#12064](https://github.com/filecoin-project/lotus/pull/12064)) +- Remove "provider" relics (#11992) ([filecoin-project/lotus#11992](https://github.com/filecoin-project/lotus/pull/11992)) +- fix sector UI (#12016) ([filecoin-project/lotus#12016](https://github.com/filecoin-project/lotus/pull/12016)) + +## Others +- ci: deprecate circle ci in favour of github actions (#11786) ([filecoin-project/lotus#11786](https://github.com/filecoin-project/lotus/pull/11786)) +- src: chain: remove C dependency from builtin types (#12015) ([filecoin-project/lotus#12015](https://github.com/filecoin-project/lotus/pull/12015)) +- chore: fix function names (#12043) ([filecoin-project/lotus#12043](https://github.com/filecoin-project/lotus/pull/12043)) +- chore: bump build version in master (#11946) ([filecoin-project/lotus#11946](https://github.com/filecoin-project/lotus/pull/11946)) +- fix: test: no snap deals in immutable deadlines (#12071) ([filecoin-project/lotus#12071](https://github.com/filecoin-project/lotus/pull/12071)) +- test: actors: manual CC onboarding and proving integration test (#12017) ([filecoin-project/lotus#12017](https://github.com/filecoin-project/lotus/pull/12017)) +- fix: ci: keep lotus checkout clean in the release workflow (#12028) ([filecoin-project/lotus#12028](https://github.com/filecoin-project/lotus/pull/12028)) +- feat!: build: separate miner and node version strings +- chore: lint: address feedback from reviews +- chore: lint: fix lint errors with new linting config +- chore: lint: update golangci lint config +- ci: fix when sorted pr checks workflow is executed +- doc: eth: restore comment lost in linter cleanup +- fix: ci: publish correct docker tags on workflow dispatch (#12060) ([filecoin-project/lotus#12060](https://github.com/filecoin-project/lotus/pull/12060)) +- feat: libp2p: Lotus stream cleanup (#11993) ([filecoin-project/lotus#11993](https://github.com/filecoin-project/lotus/pull/11993)) +- Update SupportedProofTypes (#11988) ([filecoin-project/lotus#11988](https://github.com/filecoin-project/lotus/pull/11988)) +- Revert "Update SupportedProofTypes (#11988)" (#11990) ([filecoin-project/lotus#11990](https://github.com/filecoin-project/lotus/pull/11990)) +- chore: docs: Update skeleton guide (#11960) ([filecoin-project/lotus#11960](https://github.com/filecoin-project/lotus/pull/11960)) +- chore: ci: request contents read permissions explicitly in gha (#12055) ([filecoin-project/lotus#12055](https://github.com/filecoin-project/lotus/pull/12055)) +- fix: ci: use custom GITHUB_TOKEN for GoReleaser (#12059) ([filecoin-project/lotus#12059](https://github.com/filecoin-project/lotus/pull/12059)) +- chore: pin golanglint-ci to v1.58.2 (#12054) ([filecoin-project/lotus#12054](https://github.com/filecoin-project/lotus/pull/12054)) +- chore: fix some function names (#12031) ([filecoin-project/lotus#12031](https://github.com/filecoin-project/lotus/pull/12031)) +- src: lint: bump golangci-lint to 1.59, address unchecked fmt.Fprint* +- fix: ci: do not use deprecated --debug goreleaser flag ([filecoin-project/lotus#12086](https://github.com/filecoin-project/lotus/pull/12086)) +- chore: Remove forgotten graphsync references ([filecoin-project/lotus#12084](https://github.com/filecoin-project/lotus/pull/12084)) +- chore: types: remove more items forgotten after markets ([filecoin-project/lotus#12095](https://github.com/filecoin-project/lotus/pull/12095)) +- chore: api: the Net API/CLI now remains only on daemon ([filecoin-project/lotus#12100](https://github.com/filecoin-project/lotus/pull/12100)) +- fix: release: update goreleaser config filei #12120 + +## Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Aarsh Shah | 9 | +5710/-35899 | 201 | +| Łukasz Magiera | 21 | +1891/-33776 | 335 | +| LexLuthr | 9 | +4916/-1637 | 107 | +| Phi-rjan | 9 | +3544/-187 | 92 | +| Rod Vagg | 15 | +2183/-479 | 164 | +| Piotr Galar | 6 | +130/-2386 | 30 | +| Andrew Jackson (Ajax) | 6 | +1072/-533 | 63 | +| ZenGround0 | 1 | +235/-13 | 3 | +| Hubert Bugaj | 3 | +57/-37 | 5 | +| Steven Allen | 3 | +25/-15 | 6 | +| Peter Rabbitson | 1 | +16/-8 | 4 | +| tomfees | 1 | +6/-6 | 5 | +| imxyb | 1 | +6/-0 | 1 | +| yumeiyin | 1 | +2/-2 | 2 | +| galargh | 1 | +2/-2 | 1 | + # v1.27.0 / 2024-05-27 This is an optional feature release of Lotus. Lotus v1.27.0 includes numerous improvements, bugfixes and enhancements for node operators, RPC- and ETH RPC-providers. This feature release also introduces Curio in a Beta release. Check out the Curio Beta release section for how you can get started with Curio. @@ -25,7 +173,6 @@ This release includes a lot of improvements and fixes for indexers, RPC- and ETH - [Length check the array sent to eth_feeHistory RPC](https://github.com/filecoin-project/lotus/pull/11696) - [ETH subscribe tipsets API should only return tipsets that have been executed](https://github.com/filecoin-project/lotus/pull/11858) - [Adjust indexes in event index db to match query patterns](https://github.com/filecoin-project/lotus/pull/111934) -- ## ⭐️ Curio Beta Release ⭐️ @@ -147,7 +294,6 @@ Visit the Curio Official Website insert link - github.com/libp2p/go-libp2p-pubsub (v0.10.0 -> v0.10.1) - github.com/libp2p/go-libp2p (v0.33.2 -> v0.34.1) - ## Others - ci: ci: create gh workflow that runs go checks (#11761) ([filecoin-project/lotus#11761](https://github.com/filecoin-project/lotus/pull/11761)) @@ -172,6 +318,7 @@ Visit the Curio Official Website insert link - chore: Add v13 support to invariants-checker (#11931) ([filecoin-project/lotus#11931](https://github.com/filecoin-project/lotus/pull/11931)) - chore: remove unmaintained bootstrappers (#11983) ([filecoin-project/lotus#11983](https://github.com/filecoin-project/lotus/pull/11983)) - chore: go mod: revert go version change as it breaks Docker build (#12050) ([filecoin-project/lotus#12050](https://github.com/filecoin-project/lotus/pull/12050)) +- chore: pin golanglint-ci to v1.58.2 ([filecoin-project/lotus#12054](https://github.com/filecoin-project/lotus/pull/12054)) ## Contributors @@ -4071,7 +4218,6 @@ This is a **highly recommended** but optional Lotus v1.11.1 release that introd - Release Template: remove binary validation step ([filecoin-project/lotus#6709](https://github.com/filecoin-project/lotus/pull/6709)) - Reset of the interop network ([filecoin-project/lotus#6689](https://github.com/filecoin-project/lotus/pull/6689)) - Update version.go to 1.11.1 ([filecoin-project/lotus#6621](https://github.com/filecoin-project/lotus/pull/6621)) -- chore: pin golanglint-ci to v1.58.2 ([filecoin-project/lotus#12054](https://github.com/filecoin-project/lotus/pull/12054)) ## Contributors diff --git a/Dockerfile b/Dockerfile index 5b77b134afb..51a39ed0395 100644 --- a/Dockerfile +++ b/Dockerfile @@ -109,7 +109,6 @@ COPY --from=lotus-builder /opt/filecoin/lotus-wallet /usr/local/bin/ COPY --from=lotus-builder /opt/filecoin/lotus-gateway /usr/local/bin/ COPY --from=lotus-builder /opt/filecoin/lotus-miner /usr/local/bin/ COPY --from=lotus-builder /opt/filecoin/lotus-worker /usr/local/bin/ -COPY --from=lotus-builder /opt/filecoin/curio /usr/local/bin/ COPY --from=lotus-builder /opt/filecoin/lotus-stats /usr/local/bin/ COPY --from=lotus-builder /opt/filecoin/lotus-fountain /usr/local/bin/ @@ -118,13 +117,11 @@ RUN mkdir /var/lib/lotus RUN mkdir /var/lib/lotus-miner RUN mkdir /var/lib/lotus-worker RUN mkdir /var/lib/lotus-wallet -RUN mkdir /var/lib/curio RUN chown fc: /var/tmp/filecoin-proof-parameters RUN chown fc: /var/lib/lotus RUN chown fc: /var/lib/lotus-miner RUN chown fc: /var/lib/lotus-worker RUN chown fc: /var/lib/lotus-wallet -RUN chown fc: /var/lib/curio VOLUME /var/tmp/filecoin-proof-parameters @@ -132,7 +129,6 @@ VOLUME /var/lib/lotus VOLUME /var/lib/lotus-miner VOLUME /var/lib/lotus-worker VOLUME /var/lib/lotus-wallet -VOLUME /var/lib/curio EXPOSE 1234 EXPOSE 2345 diff --git a/Makefile b/Makefile index 901c8dc00a9..1a81ed4f08b 100644 --- a/Makefile +++ b/Makefile @@ -66,7 +66,7 @@ CLEAN+=build/.update-modules deps: $(BUILD_DEPS) .PHONY: deps -build-devnets: build lotus-seed lotus-shed curio sptool +build-devnets: build lotus-seed lotus-shed .PHONY: build-devnets debug: GOFLAGS+=-tags=debug @@ -97,21 +97,6 @@ lotus-miner: $(BUILD_DEPS) .PHONY: lotus-miner BINS+=lotus-miner -curio: $(BUILD_DEPS) - rm -f curio - $(GOCC) build $(GOFLAGS) -o curio ./cmd/curio -.PHONY: curio -BINS+=curio - -cu2k: GOFLAGS+=-tags=2k -cu2k: curio - -sptool: $(BUILD_DEPS) - rm -f sptool - $(GOCC) build $(GOFLAGS) -o sptool ./cmd/sptool -.PHONY: sptool -BINS+=sptool - lotus-worker: $(BUILD_DEPS) rm -f lotus-worker $(GOCC) build $(GOFLAGS) -o lotus-worker ./cmd/lotus-worker @@ -130,13 +115,13 @@ lotus-gateway: $(BUILD_DEPS) .PHONY: lotus-gateway BINS+=lotus-gateway -build: lotus lotus-miner lotus-worker curio sptool +build: lotus lotus-miner lotus-worker @[[ $$(type -P "lotus") ]] && echo "Caution: you have \ an existing lotus binary in your PATH. This may cause problems if you don't run 'sudo make install'" || true .PHONY: build -install: install-daemon install-miner install-worker install-curio install-sptool +install: install-daemon install-miner install-worker install-daemon: install -C ./lotus /usr/local/bin/lotus @@ -144,12 +129,6 @@ install-daemon: install-miner: install -C ./lotus-miner /usr/local/bin/lotus-miner -install-curio: - install -C ./curio /usr/local/bin/curio - -install-sptool: - install -C ./sptool /usr/local/bin/sptool - install-worker: install -C ./lotus-worker /usr/local/bin/lotus-worker @@ -165,12 +144,6 @@ uninstall-daemon: uninstall-miner: rm -f /usr/local/bin/lotus-miner -uninstall-curio: - rm -f /usr/local/bin/curio - -uninstall-sptool: - rm -f /usr/local/bin/sptool - uninstall-worker: rm -f /usr/local/bin/lotus-worker @@ -272,14 +245,6 @@ install-miner-service: install-miner install-daemon-service @echo "To start the service, run: 'sudo systemctl start lotus-miner'" @echo "To enable the service on startup, run: 'sudo systemctl enable lotus-miner'" -install-curio-service: install-curio install-sptool install-daemon-service - mkdir -p /etc/systemd/system - mkdir -p /var/log/lotus - install -C -m 0644 ./scripts/curio.service /etc/systemd/system/curio.service - systemctl daemon-reload - @echo - @echo "Curio service installed. Don't forget to run 'sudo systemctl start curio' to start it and 'sudo systemctl enable curio' for it to be enabled on startup." - install-main-services: install-miner-service install-all-services: install-main-services @@ -298,12 +263,6 @@ clean-miner-service: rm -f /etc/systemd/system/lotus-miner.service systemctl daemon-reload -clean-curio-service: - -systemctl stop curio - -systemctl disable curio - rm -f /etc/systemd/system/curio.service - systemctl daemon-reload - clean-main-services: clean-daemon-service clean-all-services: clean-main-services @@ -378,7 +337,7 @@ docsgen-md-bin: api-gen actors-gen docsgen-openrpc-bin: api-gen actors-gen $(GOCC) build $(GOFLAGS) -o docgen-openrpc ./api/docgen-openrpc/cmd -docsgen-md: docsgen-md-full docsgen-md-storage docsgen-md-worker docsgen-md-curio +docsgen-md: docsgen-md-full docsgen-md-storage docsgen-md-worker docsgen-md-full: docsgen-md-bin ./docgen-md "api/api_full.go" "FullNode" "api" "./api" > documentation/en/api-v1-unstable-methods.md @@ -387,8 +346,6 @@ docsgen-md-storage: docsgen-md-bin ./docgen-md "api/api_storage.go" "StorageMiner" "api" "./api" > documentation/en/api-v0-methods-miner.md docsgen-md-worker: docsgen-md-bin ./docgen-md "api/api_worker.go" "Worker" "api" "./api" > documentation/en/api-v0-methods-worker.md -docsgen-md-curio: docsgen-md-bin - ./docgen-md "api/api_curio.go" "Curio" "api" "./api" > documentation/en/api-v0-methods-curio.md docsgen-openrpc: docsgen-openrpc-full docsgen-openrpc-storage docsgen-openrpc-worker docsgen-openrpc-gateway @@ -413,47 +370,17 @@ gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen jen: gen -snap: lotus lotus-miner lotus-worker curio sptool +snap: lotus lotus-miner lotus-worker snapcraft # snapcraft upload ./lotus_*.snap # separate from gen because it needs binaries -docsgen-cli: lotus lotus-miner lotus-worker curio sptool +docsgen-cli: lotus lotus-miner lotus-worker python3 ./scripts/generate-lotus-cli.py ./lotus config default > documentation/en/default-lotus-config.toml ./lotus-miner config default > documentation/en/default-lotus-miner-config.toml - ./curio config default > documentation/en/default-curio-config.toml .PHONY: docsgen-cli print-%: @echo $*=$($*) -### Curio devnet images -curio_docker_user?=curio -curio_base_image=$(curio_docker_user)/curio-all-in-one:latest-debug -ffi_from_source?=0 - -curio-devnet: lotus lotus-miner lotus-shed lotus-seed curio sptool -.PHONY: curio-devnet - -curio_docker_build_cmd=docker build --build-arg CURIO_TEST_IMAGE=$(curio_base_image) \ - --build-arg FFI_BUILD_FROM_SOURCE=$(ffi_from_source) $(docker_args) - -docker/curio-all-in-one: - $(curio_docker_build_cmd) -f Dockerfile.curio --target curio-all-in-one \ - -t $(curio_base_image) --build-arg GOFLAGS=-tags=debug . -.PHONY: docker/curio-all-in-one - -docker/%: - cd curiosrc/docker/$* && DOCKER_BUILDKIT=1 $(curio_docker_build_cmd) -t $(curio_docker_user)/$*-dev:dev \ - --build-arg BUILD_VERSION=dev . - -docker/curio-devnet: $(lotus_build_cmd) \ - docker/curio-all-in-one docker/lotus docker/lotus-miner docker/curio docker/yugabyte -.PHONY: docker/curio-devnet - -curio-devnet/up: - rm -rf ./curiosrc/docker/data && docker compose -f ./curiosrc/docker/docker-compose.yaml up -d - -curio-devnet/down: - docker compose -f ./curiosrc/docker/docker-compose.yaml down --rmi=local && sleep 2 && rm -rf ./curiosrc/docker/data diff --git a/api/api_curio.go b/api/api_curio.go deleted file mode 100644 index 0eceb484fdb..00000000000 --- a/api/api_curio.go +++ /dev/null @@ -1,34 +0,0 @@ -package api - -import ( - "context" - "net/http" - "net/url" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/storage/sealer/fsutil" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -type Curio interface { - Version(context.Context) (Version, error) //perm:admin - - AllocatePieceToSector(ctx context.Context, maddr address.Address, piece PieceDealInfo, rawSize int64, source url.URL, header http.Header) (SectorOffset, error) //perm:write - - StorageInit(ctx context.Context, path string, opts storiface.LocalStorageMeta) error //perm:admin - StorageAddLocal(ctx context.Context, path string) error //perm:admin - StorageDetachLocal(ctx context.Context, path string) error //perm:admin - StorageList(ctx context.Context) (map[storiface.ID][]storiface.Decl, error) //perm:admin - StorageLocal(ctx context.Context) (map[storiface.ID]string, error) //perm:admin - StorageStat(ctx context.Context, id storiface.ID) (fsutil.FsStat, error) //perm:admin - StorageInfo(context.Context, storiface.ID) (storiface.StorageInfo, error) //perm:admin - StorageFindSector(ctx context.Context, sector abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]storiface.SectorStorageInfo, error) //perm:admin - - LogList(ctx context.Context) ([]string, error) //perm:read - LogSetLevel(ctx context.Context, subsystem, level string) error //perm:admin - - // Trigger shutdown - Shutdown(context.Context) error //perm:admin -} diff --git a/api/api_full.go b/api/api_full.go index bbfcae0a2eb..5d2f6d4176e 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -6,16 +6,11 @@ import ( "fmt" "time" - "github.com/google/uuid" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p/core/peer" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" - datatransfer "github.com/filecoin-project/go-data-transfer/v2" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" @@ -34,7 +29,6 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/node/repo/imports" ) //go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_full.go -package=mocks . FullNode @@ -348,74 +342,6 @@ type FullNode interface { // Other - // MethodGroup: Client - // The Client methods all have to do with interacting with the storage and - // retrieval markets as a client - - // ClientImport imports file under the specified path into filestore. - ClientImport(ctx context.Context, ref FileRef) (*ImportRes, error) //perm:admin - // ClientRemoveImport removes file import - ClientRemoveImport(ctx context.Context, importID imports.ID) error //perm:admin - // ClientStartDeal proposes a deal with a miner. - ClientStartDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) //perm:admin - // ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking. - ClientStatelessDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) //perm:write - // ClientGetDealInfo returns the latest information about a given deal. - ClientGetDealInfo(context.Context, cid.Cid) (*DealInfo, error) //perm:read - // ClientListDeals returns information about the deals made by the local client. - ClientListDeals(ctx context.Context) ([]DealInfo, error) //perm:write - // ClientGetDealUpdates returns the status of updated deals - ClientGetDealUpdates(ctx context.Context) (<-chan DealInfo, error) //perm:write - // ClientGetDealStatus returns status given a code - ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error) //perm:read - // ClientHasLocal indicates whether a certain CID is locally stored. - ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) //perm:write - // ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer). - ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]QueryOffer, error) //perm:read - // ClientMinerQueryOffer returns a QueryOffer for the specific miner and file. - ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (QueryOffer, error) //perm:read - // ClientRetrieve initiates the retrieval of a file, as specified in the order. - ClientRetrieve(ctx context.Context, params RetrievalOrder) (*RestrievalRes, error) //perm:admin - // ClientRetrieveWait waits for retrieval to be complete - ClientRetrieveWait(ctx context.Context, deal retrievalmarket.DealID) error //perm:admin - // ClientExport exports a file stored in the local filestore to a system file - ClientExport(ctx context.Context, exportRef ExportRef, fileRef FileRef) error //perm:admin - // ClientListRetrievals returns information about retrievals made by the local client - ClientListRetrievals(ctx context.Context) ([]RetrievalInfo, error) //perm:write - // ClientGetRetrievalUpdates returns status of updated retrieval deals - ClientGetRetrievalUpdates(ctx context.Context) (<-chan RetrievalInfo, error) //perm:write - // ClientQueryAsk returns a signed StorageAsk from the specified miner. - ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*StorageAsk, error) //perm:read - // ClientCalcCommP calculates the CommP and data size of the specified CID - ClientDealPieceCID(ctx context.Context, root cid.Cid) (DataCIDSize, error) //perm:read - // ClientCalcCommP calculates the CommP for a specified file - ClientCalcCommP(ctx context.Context, inpath string) (*CommPRet, error) //perm:write - // ClientGenCar generates a CAR file for the specified file. - ClientGenCar(ctx context.Context, ref FileRef, outpath string) error //perm:write - // ClientDealSize calculates real deal data size - ClientDealSize(ctx context.Context, root cid.Cid) (DataSize, error) //perm:read - // ClientListTransfers returns the status of all ongoing transfers of data - ClientListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) //perm:write - ClientDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) //perm:write - // ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer - ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write - // ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer - ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write - // ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel - // which are stuck due to insufficient funds - ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error //perm:write - - // ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID - ClientCancelRetrievalDeal(ctx context.Context, dealid retrievalmarket.DealID) error //perm:write - - // ClientUnimport removes references to the specified file from filestore - // ClientUnimport(path string) - - // ClientListImports lists imported files and their root CIDs - ClientListImports(ctx context.Context) ([]Import, error) //perm:write - - // ClientListAsks() []Ask - // MethodGroup: State // The State methods are used to query, inspect, and interact with chain state. // Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset. @@ -898,6 +824,9 @@ type FullNode interface { // Replays all transactions in a block returning the requested traces for each transaction EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) //perm:read + // Implmements OpenEthereum-compatible API method trace_transaction + EthTraceTransaction(ctx context.Context, txHash string) ([]*ethtypes.EthTraceTransaction, error) //perm:read + // CreateBackup creates node backup onder the specified file name. The // method requires that the lotus daemon is running with the // LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that @@ -938,17 +867,6 @@ type EthSubscriber interface { EthSubscription(ctx context.Context, r jsonrpc.RawParams) error // rpc_method:eth_subscription notify:true } -type StorageAsk struct { - Response *storagemarket.StorageAsk - - DealProtocols []string -} - -type FileRef struct { - Path string - IsCAR bool -} - type MinerSectors struct { // Live sectors that should be proven. Live uint64 @@ -958,55 +876,6 @@ type MinerSectors struct { Faulty uint64 } -type ImportRes struct { - Root cid.Cid - ImportID imports.ID -} - -type Import struct { - Key imports.ID - Err string - - Root *cid.Cid - - // Source is the provenance of the import, e.g. "import", "unknown", else. - // Currently useless but may be used in the future. - Source string - - // FilePath is the path of the original file. It is important that the file - // is retained at this path, because it will be referenced during - // the transfer (when we do the UnixFS chunking, we don't duplicate the - // leaves, but rather point to chunks of the original data through - // positional references). - FilePath string - - // CARPath is the path of the CAR file containing the DAG for this import. - CARPath string -} - -type DealInfo struct { - ProposalCid cid.Cid - State storagemarket.StorageDealStatus - Message string // more information about deal state, particularly errors - DealStages *storagemarket.DealStages - Provider address.Address - - DataRef *storagemarket.DataRef - PieceCID cid.Cid - Size uint64 - - PricePerEpoch types.BigInt - Duration uint64 - - DealID abi.DealID - - CreationTime time.Time - Verified bool - - TransferChannelID *datatransfer.ChannelID - DataTransfer *DataTransferChannel -} - type MsgLookup struct { Message cid.Cid // Can be different than requested, in case it was replaced, but only gas values changed Receipt types.MessageReceipt @@ -1128,51 +997,21 @@ type MinerPower struct { HasMinPower bool } -type QueryOffer struct { - Err string - - Root cid.Cid - Piece *cid.Cid - - Size uint64 - MinPrice types.BigInt - UnsealPrice types.BigInt - PricePerByte abi.TokenAmount - PaymentInterval uint64 - PaymentIntervalIncrease uint64 - Miner address.Address - MinerPeer retrievalmarket.RetrievalPeer -} - -func (o *QueryOffer) Order(client address.Address) RetrievalOrder { - return RetrievalOrder{ - Root: o.Root, - Piece: o.Piece, - Size: o.Size, - Total: o.MinPrice, - UnsealPrice: o.UnsealPrice, - PaymentInterval: o.PaymentInterval, - PaymentIntervalIncrease: o.PaymentIntervalIncrease, - Client: client, - - Miner: o.Miner, - MinerPeer: &o.MinerPeer, - } -} - type MarketBalance struct { Escrow big.Int Locked big.Int } type MarketDealState struct { - SectorStartEpoch abi.ChainEpoch // -1 if not yet included in proven sector - LastUpdatedEpoch abi.ChainEpoch // -1 if deal state never updated - SlashEpoch abi.ChainEpoch // -1 if deal never slashed + SectorNumber abi.SectorNumber // 0 if not yet included in proven sector (0 is also a valid sector number). + SectorStartEpoch abi.ChainEpoch // -1 if not yet included in proven sector + LastUpdatedEpoch abi.ChainEpoch // -1 if deal state never updated + SlashEpoch abi.ChainEpoch // -1 if deal never slashed } func MakeDealState(mds market.DealState) MarketDealState { return MarketDealState{ + SectorNumber: mds.SectorNumber(), SectorStartEpoch: mds.SectorStartEpoch(), LastUpdatedEpoch: mds.LastUpdatedEpoch(), SlashEpoch: mds.SlashEpoch(), @@ -1183,6 +1022,10 @@ type mstate struct { s MarketDealState } +func (m mstate) SectorNumber() abi.SectorNumber { + return m.s.SectorNumber +} + func (m mstate) SectorStartEpoch() abi.ChainEpoch { return m.s.SectorStartEpoch } @@ -1208,27 +1051,6 @@ type MarketDeal struct { State MarketDealState } -type RetrievalOrder struct { - Root cid.Cid - Piece *cid.Cid - DataSelector *Selector - - // todo: Size/Total are only used for calculating price per byte; we should let users just pass that - Size uint64 - Total types.BigInt - - UnsealPrice types.BigInt - PaymentInterval uint64 - PaymentIntervalIncrease uint64 - Client address.Address - Miner address.Address - MinerPeer *retrievalmarket.RetrievalPeer - - RemoteStore *RemoteStoreID `json:"RemoteStore,omitempty"` -} - -type RemoteStoreID = uuid.UUID - type InvocResult struct { MsgCid cid.Cid Msg *types.Message @@ -1239,39 +1061,6 @@ type InvocResult struct { Duration time.Duration } -type MethodCall struct { - types.MessageReceipt - Error string -} - -type StartDealParams struct { - Data *storagemarket.DataRef - Wallet address.Address - Miner address.Address - EpochPrice types.BigInt - MinBlocksDuration uint64 - ProviderCollateral big.Int - DealStartEpoch abi.ChainEpoch - FastRetrieval bool - VerifiedDeal bool -} - -func (s *StartDealParams) UnmarshalJSON(raw []byte) (err error) { - type sdpAlias StartDealParams - - sdp := sdpAlias{ - FastRetrieval: true, - } - - if err := json.Unmarshal(raw, &sdp); err != nil { - return err - } - - *s = StartDealParams(sdp) - - return nil -} - type IpldObject struct { Cid cid.Cid Obj interface{} @@ -1383,21 +1172,6 @@ type BlockTemplate struct { WinningPoStProof []builtin.PoStProof } -type DataSize struct { - PayloadSize int64 - PieceSize abi.PaddedPieceSize -} - -type DataCIDSize struct { - PayloadSize int64 - PieceSize abi.PaddedPieceSize - PieceCID cid.Cid -} - -type CommPRet struct { - Root cid.Cid - Size abi.UnpaddedPieceSize -} type HeadChange struct { Type string Val *types.TipSet diff --git a/api/api_gateway.go b/api/api_gateway.go index 62bff64cfad..a9f0e3962e6 100644 --- a/api/api_gateway.go +++ b/api/api_gateway.go @@ -132,6 +132,7 @@ type Gateway interface { Web3ClientVersion(ctx context.Context) (string, error) EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error) EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) + EthTraceTransaction(ctx context.Context, txHash string) ([]*ethtypes.EthTraceTransaction, error) GetActorEventsRaw(ctx context.Context, filter *types.ActorEventFilter) ([]*types.ActorEvent, error) SubscribeActorEventsRaw(ctx context.Context, filter *types.ActorEventFilter) (<-chan *types.ActorEvent, error) diff --git a/api/api_net.go b/api/api_net.go index cfcd8d87e06..3dc19dbed59 100644 --- a/api/api_net.go +++ b/api/api_net.go @@ -66,11 +66,6 @@ type Net interface { ID(context.Context) (peer.ID, error) //perm:read } -type CommonNet interface { - Common - Net -} - type NatInfo struct { Reachability network.Reachability PublicAddrs []string diff --git a/api/api_storage.go b/api/api_storage.go index 410fa2af16c..fa10d28fc32 100644 --- a/api/api_storage.go +++ b/api/api_storage.go @@ -7,14 +7,9 @@ import ( "github.com/google/uuid" "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p/core/peer" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" - datatransfer "github.com/filecoin-project/go-data-transfer/v2" - "github.com/filecoin-project/go-fil-markets/piecestore" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" @@ -44,7 +39,6 @@ import ( // StorageMiner is a low-level interface to the Filecoin network storage miner node type StorageMiner interface { Common - Net ActorAddress(context.Context) (address.Address, error) //perm:read @@ -215,110 +209,12 @@ type StorageMiner interface { StorageDetachLocal(ctx context.Context, path string) error //perm:admin StorageRedeclareLocal(ctx context.Context, id *storiface.ID, dropMissing bool) error //perm:admin - MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error //perm:write - MarketListDeals(ctx context.Context) ([]*MarketDeal, error) //perm:read - - // MarketListRetrievalDeals is deprecated, returns empty list - MarketListRetrievalDeals(ctx context.Context) ([]struct{}, error) //perm:read - MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error) //perm:read - MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) //perm:read - MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error //perm:admin - MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) //perm:read - MarketSetRetrievalAsk(ctx context.Context, rask *retrievalmarket.Ask) error //perm:admin - MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error) //perm:read - MarketListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) //perm:write - MarketDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) //perm:write - // MarketDataTransferDiagnostics generates debugging information about current data transfers over graphsync - MarketDataTransferDiagnostics(ctx context.Context, p peer.ID) (*TransferDiagnostics, error) //perm:write - // MarketRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer - MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write - // MarketCancelDataTransfer cancels a data transfer with the given transfer ID and other peer - MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write - MarketPendingDeals(ctx context.Context) (PendingDealInfo, error) //perm:write - MarketPublishPendingDeals(ctx context.Context) error //perm:admin - MarketRetryPublishDeal(ctx context.Context, propcid cid.Cid) error //perm:admin - - // DagstoreListShards returns information about all shards known to the - // DAG store. Only available on nodes running the markets subsystem. - DagstoreListShards(ctx context.Context) ([]DagstoreShardInfo, error) //perm:read - - // DagstoreInitializeShard initializes an uninitialized shard. - // - // Initialization consists of fetching the shard's data (deal payload) from - // the storage subsystem, generating an index, and persisting the index - // to facilitate later retrievals, and/or to publish to external sources. - // - // This operation is intended to complement the initial migration. The - // migration registers a shard for every unique piece CID, with lazy - // initialization. Thus, shards are not initialized immediately to avoid - // IO activity competing with proving. Instead, shard are initialized - // when first accessed. This method forces the initialization of a shard by - // accessing it and immediately releasing it. This is useful to warm up the - // cache to facilitate subsequent retrievals, and to generate the indexes - // to publish them externally. - // - // This operation fails if the shard is not in ShardStateNew state. - // It blocks until initialization finishes. - DagstoreInitializeShard(ctx context.Context, key string) error //perm:write - - // DagstoreRecoverShard attempts to recover a failed shard. - // - // This operation fails if the shard is not in ShardStateErrored state. - // It blocks until recovery finishes. If recovery failed, it returns the - // error. - DagstoreRecoverShard(ctx context.Context, key string) error //perm:write - - // DagstoreInitializeAll initializes all uninitialized shards in bulk, - // according to the policy passed in the parameters. - // - // It is recommended to set a maximum concurrency to avoid extreme - // IO pressure if the storage subsystem has a large amount of deals. - // - // It returns a stream of events to report progress. - DagstoreInitializeAll(ctx context.Context, params DagstoreInitializeAllParams) (<-chan DagstoreInitializeAllEvent, error) //perm:write - - // DagstoreGC runs garbage collection on the DAG store. - DagstoreGC(ctx context.Context) ([]DagstoreShardResult, error) //perm:admin - - // DagstoreRegisterShard registers a shard manually with dagstore with given pieceCID - DagstoreRegisterShard(ctx context.Context, key string) error //perm:admin - - // IndexerAnnounceDeal informs indexer nodes that a new deal was received, - // so they can download its index - IndexerAnnounceDeal(ctx context.Context, proposalCid cid.Cid) error //perm:admin - - // IndexerAnnounceAllDeals informs the indexer nodes aboutall active deals. - IndexerAnnounceAllDeals(ctx context.Context) error //perm:admin - - // DagstoreLookupPieces returns information about shards that contain the given CID. - DagstoreLookupPieces(ctx context.Context, cid cid.Cid) ([]DagstoreShardInfo, error) //perm:admin + MarketListDeals(ctx context.Context) ([]*MarketDeal, error) //perm:read // RuntimeSubsystems returns the subsystems that are enabled // in this instance. RuntimeSubsystems(ctx context.Context) (MinerSubsystems, error) //perm:read - DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error //perm:admin - DealsList(ctx context.Context) ([]*MarketDeal, error) //perm:admin - DealsConsiderOnlineStorageDeals(context.Context) (bool, error) //perm:admin - DealsSetConsiderOnlineStorageDeals(context.Context, bool) error //perm:admin - DealsConsiderOnlineRetrievalDeals(context.Context) (bool, error) //perm:admin - DealsSetConsiderOnlineRetrievalDeals(context.Context, bool) error //perm:admin - DealsPieceCidBlocklist(context.Context) ([]cid.Cid, error) //perm:admin - DealsSetPieceCidBlocklist(context.Context, []cid.Cid) error //perm:admin - DealsConsiderOfflineStorageDeals(context.Context) (bool, error) //perm:admin - DealsSetConsiderOfflineStorageDeals(context.Context, bool) error //perm:admin - DealsConsiderOfflineRetrievalDeals(context.Context) (bool, error) //perm:admin - DealsSetConsiderOfflineRetrievalDeals(context.Context, bool) error //perm:admin - DealsConsiderVerifiedStorageDeals(context.Context) (bool, error) //perm:admin - DealsSetConsiderVerifiedStorageDeals(context.Context, bool) error //perm:admin - DealsConsiderUnverifiedStorageDeals(context.Context) (bool, error) //perm:admin - DealsSetConsiderUnverifiedStorageDeals(context.Context, bool) error //perm:admin - - PiecesListPieces(ctx context.Context) ([]cid.Cid, error) //perm:read - PiecesListCidInfos(ctx context.Context) ([]cid.Cid, error) //perm:read - PiecesGetPieceInfo(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error) //perm:read - PiecesGetCIDInfo(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error) //perm:read - // CreateBackup creates node backup onder the specified file name. The // method requires that the lotus-miner is running with the // LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that @@ -471,37 +367,6 @@ type SectorOffset struct { Offset abi.PaddedPieceSize } -// DagstoreShardInfo is the serialized form of dagstore.DagstoreShardInfo that -// we expose through JSON-RPC to avoid clients having to depend on the -// dagstore lib. -type DagstoreShardInfo struct { - Key string - State string - Error string -} - -// DagstoreShardResult enumerates results per shard. -type DagstoreShardResult struct { - Key string - Success bool - Error string -} - -type DagstoreInitializeAllParams struct { - MaxConcurrency int - IncludeSealed bool -} - -// DagstoreInitializeAllEvent represents an initialization event. -type DagstoreInitializeAllEvent struct { - Key string - Event string // "start", "end" - Success bool - Error string - Total int - Current int -} - type NumAssignerMeta struct { Reserved bitfield.BitField Allocated bitfield.BitField diff --git a/api/client/client.go b/api/client/client.go index 4a8ff927227..9722c41264d 100644 --- a/api/client/client.go +++ b/api/client/client.go @@ -15,19 +15,9 @@ import ( "github.com/filecoin-project/lotus/lib/rpcenc" ) -// NewCurioRpc creates a new http jsonrpc client. -func NewCurioRpc(ctx context.Context, addr string, requestHeader http.Header) (api.Curio, jsonrpc.ClientCloser, error) { - var res v1api.CurioStruct - - closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", - api.GetInternalStructs(&res), requestHeader, jsonrpc.WithErrors(api.RPCErrors)) - - return &res, closer, err -} - // NewCommonRPCV0 creates a new http jsonrpc client. -func NewCommonRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.CommonNet, jsonrpc.ClientCloser, error) { - var res v0api.CommonNetStruct +func NewCommonRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.Common, jsonrpc.ClientCloser, error) { + var res v0api.CommonStruct closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", api.GetInternalStructs(&res), requestHeader, jsonrpc.WithErrors(api.RPCErrors)) diff --git a/api/docgen-openrpc/openrpc.go b/api/docgen-openrpc/openrpc.go index c9504ba89c1..52e37aec657 100644 --- a/api/docgen-openrpc/openrpc.go +++ b/api/docgen-openrpc/openrpc.go @@ -106,7 +106,7 @@ func NewLotusOpenRPCDocument(Comments, GroupDocs map[string]string) *go_openrpc_ title := "Lotus RPC API" info.Title = (*meta_schema.InfoObjectProperties)(&title) - version := build.BuildVersion + version := build.NodeBuildVersion info.Version = (*meta_schema.InfoObjectVersion)(&version) return info }, diff --git a/api/docgen/docgen.go b/api/docgen/docgen.go index 2d34a0903cc..cba7bb6b5b3 100644 --- a/api/docgen/docgen.go +++ b/api/docgen/docgen.go @@ -16,8 +16,6 @@ import ( "github.com/google/uuid" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - "github.com/ipfs/go-graphsync" - textselector "github.com/ipld/go-ipld-selector-text-lite" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/metrics" "github.com/libp2p/go-libp2p/core/network" @@ -27,9 +25,6 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" - datatransfer "github.com/filecoin-project/go-data-transfer/v2" - "github.com/filecoin-project/go-fil-markets/filestore" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" @@ -44,7 +39,6 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/node/repo/imports" sealing "github.com/filecoin-project/lotus/storage/pipeline" "github.com/filecoin-project/lotus/storage/sealer/sealtasks" "github.com/filecoin-project/lotus/storage/sealer/storiface" @@ -96,11 +90,6 @@ func init() { addExample(pid) addExample(&pid) - storeIDExample := imports.ID(50) - textSelExample := textselector.Expression("Links/21/Hash/Links/42/Hash") - apiSelExample := api.Selector("Links/21/Hash/Links/42/Hash") - clientEvent := retrievalmarket.ClientEventDealAccepted - block := blocks.Block(&blocks.BasicBlock{}) ExampleValues[reflect.TypeOf(&block).Elem()] = block @@ -130,17 +119,7 @@ func init() { addExample(api.FullAPIVersion1) addExample(api.PCHInbound) addExample(time.Minute) - addExample(graphsync.NewRequestID()) - addExample(datatransfer.TransferID(3)) - addExample(datatransfer.Ongoing) - addExample(storeIDExample) - addExample(&storeIDExample) - addExample(clientEvent) - addExample(&clientEvent) - addExample(retrievalmarket.ClientEventDealAccepted) - addExample(retrievalmarket.DealStatusNew) - addExample(&textSelExample) - addExample(&apiSelExample) + addExample(network.ReachabilityPublic) addExample(build.TestNetworkVersion) allocationId := verifreg.AllocationId(0) @@ -206,10 +185,9 @@ func init() { ExampleValues[reflect.TypeOf(struct{ A multiaddr.Multiaddr }{}).Field(0).Type] = maddr // miner specific - addExample(filestore.Path(".lotusminer/fstmp123")) + si := uint64(12) addExample(&si) - addExample(retrievalmarket.DealID(5)) addExample(map[string]cid.Cid{}) addExample(map[string][]api.SealedRef{ "98000": { @@ -313,17 +291,8 @@ func init() { api.SubsystemMining, api.SubsystemSealing, api.SubsystemSectorStorage, - api.SubsystemMarkets, - }) - addExample(api.DagstoreShardResult{ - Key: "baga6ea4seaqecmtz7iak33dsfshi627abz4i4665dfuzr3qfs4bmad6dx3iigdq", - Error: "", - }) - addExample(api.DagstoreShardInfo{ - Key: "baga6ea4seaqecmtz7iak33dsfshi627abz4i4665dfuzr3qfs4bmad6dx3iigdq", - State: "ShardStateAvailable", - Error: "", }) + addExample(storiface.ResourceTable) addExample(network.ScopeStat{ Memory: 123, @@ -456,10 +425,6 @@ func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []r i = &api.GatewayStruct{} t = reflect.TypeOf(new(struct{ api.Gateway })).Elem() permStruct = append(permStruct, reflect.TypeOf(api.GatewayStruct{}.Internal)) - case "Curio": - i = &api.CurioStruct{} - t = reflect.TypeOf(new(struct{ api.Curio })).Elem() - permStruct = append(permStruct, reflect.TypeOf(api.CurioStruct{}.Internal)) default: panic("unknown type") } diff --git a/api/eth_aliases.go b/api/eth_aliases.go index eb0c510050e..6a7901cf920 100644 --- a/api/eth_aliases.go +++ b/api/eth_aliases.go @@ -42,6 +42,7 @@ func CreateEthRPCAliases(as apitypes.Aliaser) { as.AliasMethod("trace_block", "Filecoin.EthTraceBlock") as.AliasMethod("trace_replayBlockTransactions", "Filecoin.EthTraceReplayBlockTransactions") + as.AliasMethod("trace_transaction", "Filecoin.EthTraceTransaction") as.AliasMethod("net_version", "Filecoin.NetVersion") as.AliasMethod("net_listening", "Filecoin.NetListening") diff --git a/api/miner_subsystems.go b/api/miner_subsystems.go index a77de7e3c95..2f17ad02f4b 100644 --- a/api/miner_subsystems.go +++ b/api/miner_subsystems.go @@ -13,9 +13,6 @@ const ( // SubsystemUnknown is a placeholder for the zero value. It should never // be used. SubsystemUnknown MinerSubsystem = iota - // SubsystemMarkets signifies the storage and retrieval - // deal-making subsystem. - SubsystemMarkets // SubsystemMining signifies the mining subsystem. SubsystemMining // SubsystemSealing signifies the sealing subsystem. @@ -26,7 +23,6 @@ const ( var MinerSubsystemToString = map[MinerSubsystem]string{ SubsystemUnknown: "Unknown", - SubsystemMarkets: "Markets", SubsystemMining: "Mining", SubsystemSealing: "Sealing", SubsystemSectorStorage: "SectorStorage", @@ -34,7 +30,6 @@ var MinerSubsystemToString = map[MinerSubsystem]string{ var MinerSubsystemToID = map[string]MinerSubsystem{ "Unknown": SubsystemUnknown, - "Markets": SubsystemMarkets, "Mining": SubsystemMining, "Sealing": SubsystemSealing, "SectorStorage": SubsystemSectorStorage, diff --git a/api/mocks/mock_full.go b/api/mocks/mock_full.go index 9960faeffe5..b15eea34111 100644 --- a/api/mocks/mock_full.go +++ b/api/mocks/mock_full.go @@ -21,8 +21,6 @@ import ( address "github.com/filecoin-project/go-address" bitfield "github.com/filecoin-project/go-bitfield" - datatransfer "github.com/filecoin-project/go-data-transfer/v2" - retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket" jsonrpc "github.com/filecoin-project/go-jsonrpc" auth "github.com/filecoin-project/go-jsonrpc/auth" abi "github.com/filecoin-project/go-state-types/abi" @@ -42,7 +40,6 @@ import ( ethtypes "github.com/filecoin-project/lotus/chain/types/ethtypes" alerting "github.com/filecoin-project/lotus/journal/alerting" dtypes "github.com/filecoin-project/lotus/node/modules/dtypes" - imports "github.com/filecoin-project/lotus/node/repo/imports" ) // MockFullNode is a mock of FullNode interface. @@ -511,418 +508,6 @@ func (mr *MockFullNodeMockRecorder) ChainTipSetWeight(arg0, arg1 interface{}) *g return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainTipSetWeight", reflect.TypeOf((*MockFullNode)(nil).ChainTipSetWeight), arg0, arg1) } -// ClientCalcCommP mocks base method. -func (m *MockFullNode) ClientCalcCommP(arg0 context.Context, arg1 string) (*api.CommPRet, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientCalcCommP", arg0, arg1) - ret0, _ := ret[0].(*api.CommPRet) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientCalcCommP indicates an expected call of ClientCalcCommP. -func (mr *MockFullNodeMockRecorder) ClientCalcCommP(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCalcCommP", reflect.TypeOf((*MockFullNode)(nil).ClientCalcCommP), arg0, arg1) -} - -// ClientCancelDataTransfer mocks base method. -func (m *MockFullNode) ClientCancelDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientCancelDataTransfer", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(error) - return ret0 -} - -// ClientCancelDataTransfer indicates an expected call of ClientCancelDataTransfer. -func (mr *MockFullNodeMockRecorder) ClientCancelDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientCancelDataTransfer), arg0, arg1, arg2, arg3) -} - -// ClientCancelRetrievalDeal mocks base method. -func (m *MockFullNode) ClientCancelRetrievalDeal(arg0 context.Context, arg1 retrievalmarket.DealID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientCancelRetrievalDeal", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ClientCancelRetrievalDeal indicates an expected call of ClientCancelRetrievalDeal. -func (mr *MockFullNodeMockRecorder) ClientCancelRetrievalDeal(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelRetrievalDeal", reflect.TypeOf((*MockFullNode)(nil).ClientCancelRetrievalDeal), arg0, arg1) -} - -// ClientDataTransferUpdates mocks base method. -func (m *MockFullNode) ClientDataTransferUpdates(arg0 context.Context) (<-chan api.DataTransferChannel, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientDataTransferUpdates", arg0) - ret0, _ := ret[0].(<-chan api.DataTransferChannel) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientDataTransferUpdates indicates an expected call of ClientDataTransferUpdates. -func (mr *MockFullNodeMockRecorder) ClientDataTransferUpdates(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDataTransferUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientDataTransferUpdates), arg0) -} - -// ClientDealPieceCID mocks base method. -func (m *MockFullNode) ClientDealPieceCID(arg0 context.Context, arg1 cid.Cid) (api.DataCIDSize, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientDealPieceCID", arg0, arg1) - ret0, _ := ret[0].(api.DataCIDSize) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientDealPieceCID indicates an expected call of ClientDealPieceCID. -func (mr *MockFullNodeMockRecorder) ClientDealPieceCID(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealPieceCID", reflect.TypeOf((*MockFullNode)(nil).ClientDealPieceCID), arg0, arg1) -} - -// ClientDealSize mocks base method. -func (m *MockFullNode) ClientDealSize(arg0 context.Context, arg1 cid.Cid) (api.DataSize, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientDealSize", arg0, arg1) - ret0, _ := ret[0].(api.DataSize) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientDealSize indicates an expected call of ClientDealSize. -func (mr *MockFullNodeMockRecorder) ClientDealSize(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealSize", reflect.TypeOf((*MockFullNode)(nil).ClientDealSize), arg0, arg1) -} - -// ClientExport mocks base method. -func (m *MockFullNode) ClientExport(arg0 context.Context, arg1 api.ExportRef, arg2 api.FileRef) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientExport", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// ClientExport indicates an expected call of ClientExport. -func (mr *MockFullNodeMockRecorder) ClientExport(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientExport", reflect.TypeOf((*MockFullNode)(nil).ClientExport), arg0, arg1, arg2) -} - -// ClientFindData mocks base method. -func (m *MockFullNode) ClientFindData(arg0 context.Context, arg1 cid.Cid, arg2 *cid.Cid) ([]api.QueryOffer, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientFindData", arg0, arg1, arg2) - ret0, _ := ret[0].([]api.QueryOffer) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientFindData indicates an expected call of ClientFindData. -func (mr *MockFullNodeMockRecorder) ClientFindData(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientFindData", reflect.TypeOf((*MockFullNode)(nil).ClientFindData), arg0, arg1, arg2) -} - -// ClientGenCar mocks base method. -func (m *MockFullNode) ClientGenCar(arg0 context.Context, arg1 api.FileRef, arg2 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientGenCar", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// ClientGenCar indicates an expected call of ClientGenCar. -func (mr *MockFullNodeMockRecorder) ClientGenCar(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGenCar", reflect.TypeOf((*MockFullNode)(nil).ClientGenCar), arg0, arg1, arg2) -} - -// ClientGetDealInfo mocks base method. -func (m *MockFullNode) ClientGetDealInfo(arg0 context.Context, arg1 cid.Cid) (*api.DealInfo, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientGetDealInfo", arg0, arg1) - ret0, _ := ret[0].(*api.DealInfo) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientGetDealInfo indicates an expected call of ClientGetDealInfo. -func (mr *MockFullNodeMockRecorder) ClientGetDealInfo(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealInfo", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealInfo), arg0, arg1) -} - -// ClientGetDealStatus mocks base method. -func (m *MockFullNode) ClientGetDealStatus(arg0 context.Context, arg1 uint64) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientGetDealStatus", arg0, arg1) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientGetDealStatus indicates an expected call of ClientGetDealStatus. -func (mr *MockFullNodeMockRecorder) ClientGetDealStatus(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealStatus", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealStatus), arg0, arg1) -} - -// ClientGetDealUpdates mocks base method. -func (m *MockFullNode) ClientGetDealUpdates(arg0 context.Context) (<-chan api.DealInfo, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientGetDealUpdates", arg0) - ret0, _ := ret[0].(<-chan api.DealInfo) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientGetDealUpdates indicates an expected call of ClientGetDealUpdates. -func (mr *MockFullNodeMockRecorder) ClientGetDealUpdates(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealUpdates), arg0) -} - -// ClientGetRetrievalUpdates mocks base method. -func (m *MockFullNode) ClientGetRetrievalUpdates(arg0 context.Context) (<-chan api.RetrievalInfo, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientGetRetrievalUpdates", arg0) - ret0, _ := ret[0].(<-chan api.RetrievalInfo) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientGetRetrievalUpdates indicates an expected call of ClientGetRetrievalUpdates. -func (mr *MockFullNodeMockRecorder) ClientGetRetrievalUpdates(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetRetrievalUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientGetRetrievalUpdates), arg0) -} - -// ClientHasLocal mocks base method. -func (m *MockFullNode) ClientHasLocal(arg0 context.Context, arg1 cid.Cid) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientHasLocal", arg0, arg1) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientHasLocal indicates an expected call of ClientHasLocal. -func (mr *MockFullNodeMockRecorder) ClientHasLocal(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientHasLocal", reflect.TypeOf((*MockFullNode)(nil).ClientHasLocal), arg0, arg1) -} - -// ClientImport mocks base method. -func (m *MockFullNode) ClientImport(arg0 context.Context, arg1 api.FileRef) (*api.ImportRes, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientImport", arg0, arg1) - ret0, _ := ret[0].(*api.ImportRes) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientImport indicates an expected call of ClientImport. -func (mr *MockFullNodeMockRecorder) ClientImport(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientImport", reflect.TypeOf((*MockFullNode)(nil).ClientImport), arg0, arg1) -} - -// ClientListDataTransfers mocks base method. -func (m *MockFullNode) ClientListDataTransfers(arg0 context.Context) ([]api.DataTransferChannel, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientListDataTransfers", arg0) - ret0, _ := ret[0].([]api.DataTransferChannel) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientListDataTransfers indicates an expected call of ClientListDataTransfers. -func (mr *MockFullNodeMockRecorder) ClientListDataTransfers(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDataTransfers", reflect.TypeOf((*MockFullNode)(nil).ClientListDataTransfers), arg0) -} - -// ClientListDeals mocks base method. -func (m *MockFullNode) ClientListDeals(arg0 context.Context) ([]api.DealInfo, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientListDeals", arg0) - ret0, _ := ret[0].([]api.DealInfo) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientListDeals indicates an expected call of ClientListDeals. -func (mr *MockFullNodeMockRecorder) ClientListDeals(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDeals", reflect.TypeOf((*MockFullNode)(nil).ClientListDeals), arg0) -} - -// ClientListImports mocks base method. -func (m *MockFullNode) ClientListImports(arg0 context.Context) ([]api.Import, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientListImports", arg0) - ret0, _ := ret[0].([]api.Import) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientListImports indicates an expected call of ClientListImports. -func (mr *MockFullNodeMockRecorder) ClientListImports(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListImports", reflect.TypeOf((*MockFullNode)(nil).ClientListImports), arg0) -} - -// ClientListRetrievals mocks base method. -func (m *MockFullNode) ClientListRetrievals(arg0 context.Context) ([]api.RetrievalInfo, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientListRetrievals", arg0) - ret0, _ := ret[0].([]api.RetrievalInfo) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientListRetrievals indicates an expected call of ClientListRetrievals. -func (mr *MockFullNodeMockRecorder) ClientListRetrievals(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListRetrievals", reflect.TypeOf((*MockFullNode)(nil).ClientListRetrievals), arg0) -} - -// ClientMinerQueryOffer mocks base method. -func (m *MockFullNode) ClientMinerQueryOffer(arg0 context.Context, arg1 address.Address, arg2 cid.Cid, arg3 *cid.Cid) (api.QueryOffer, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientMinerQueryOffer", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(api.QueryOffer) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientMinerQueryOffer indicates an expected call of ClientMinerQueryOffer. -func (mr *MockFullNodeMockRecorder) ClientMinerQueryOffer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientMinerQueryOffer", reflect.TypeOf((*MockFullNode)(nil).ClientMinerQueryOffer), arg0, arg1, arg2, arg3) -} - -// ClientQueryAsk mocks base method. -func (m *MockFullNode) ClientQueryAsk(arg0 context.Context, arg1 peer.ID, arg2 address.Address) (*api.StorageAsk, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientQueryAsk", arg0, arg1, arg2) - ret0, _ := ret[0].(*api.StorageAsk) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientQueryAsk indicates an expected call of ClientQueryAsk. -func (mr *MockFullNodeMockRecorder) ClientQueryAsk(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientQueryAsk", reflect.TypeOf((*MockFullNode)(nil).ClientQueryAsk), arg0, arg1, arg2) -} - -// ClientRemoveImport mocks base method. -func (m *MockFullNode) ClientRemoveImport(arg0 context.Context, arg1 imports.ID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientRemoveImport", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ClientRemoveImport indicates an expected call of ClientRemoveImport. -func (mr *MockFullNodeMockRecorder) ClientRemoveImport(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRemoveImport", reflect.TypeOf((*MockFullNode)(nil).ClientRemoveImport), arg0, arg1) -} - -// ClientRestartDataTransfer mocks base method. -func (m *MockFullNode) ClientRestartDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientRestartDataTransfer", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(error) - return ret0 -} - -// ClientRestartDataTransfer indicates an expected call of ClientRestartDataTransfer. -func (mr *MockFullNodeMockRecorder) ClientRestartDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRestartDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientRestartDataTransfer), arg0, arg1, arg2, arg3) -} - -// ClientRetrieve mocks base method. -func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOrder) (*api.RestrievalRes, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1) - ret0, _ := ret[0].(*api.RestrievalRes) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientRetrieve indicates an expected call of ClientRetrieve. -func (mr *MockFullNodeMockRecorder) ClientRetrieve(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieve", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieve), arg0, arg1) -} - -// ClientRetrieveTryRestartInsufficientFunds mocks base method. -func (m *MockFullNode) ClientRetrieveTryRestartInsufficientFunds(arg0 context.Context, arg1 address.Address) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientRetrieveTryRestartInsufficientFunds", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ClientRetrieveTryRestartInsufficientFunds indicates an expected call of ClientRetrieveTryRestartInsufficientFunds. -func (mr *MockFullNodeMockRecorder) ClientRetrieveTryRestartInsufficientFunds(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveTryRestartInsufficientFunds", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveTryRestartInsufficientFunds), arg0, arg1) -} - -// ClientRetrieveWait mocks base method. -func (m *MockFullNode) ClientRetrieveWait(arg0 context.Context, arg1 retrievalmarket.DealID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientRetrieveWait", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ClientRetrieveWait indicates an expected call of ClientRetrieveWait. -func (mr *MockFullNodeMockRecorder) ClientRetrieveWait(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveWait", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveWait), arg0, arg1) -} - -// ClientStartDeal mocks base method. -func (m *MockFullNode) ClientStartDeal(arg0 context.Context, arg1 *api.StartDealParams) (*cid.Cid, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientStartDeal", arg0, arg1) - ret0, _ := ret[0].(*cid.Cid) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientStartDeal indicates an expected call of ClientStartDeal. -func (mr *MockFullNodeMockRecorder) ClientStartDeal(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStartDeal", reflect.TypeOf((*MockFullNode)(nil).ClientStartDeal), arg0, arg1) -} - -// ClientStatelessDeal mocks base method. -func (m *MockFullNode) ClientStatelessDeal(arg0 context.Context, arg1 *api.StartDealParams) (*cid.Cid, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientStatelessDeal", arg0, arg1) - ret0, _ := ret[0].(*cid.Cid) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientStatelessDeal indicates an expected call of ClientStatelessDeal. -func (mr *MockFullNodeMockRecorder) ClientStatelessDeal(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStatelessDeal", reflect.TypeOf((*MockFullNode)(nil).ClientStatelessDeal), arg0, arg1) -} - // Closing mocks base method. func (m *MockFullNode) Closing(arg0 context.Context) (<-chan struct{}, error) { m.ctrl.T.Helper() @@ -1522,6 +1107,21 @@ func (mr *MockFullNodeMockRecorder) EthTraceReplayBlockTransactions(arg0, arg1, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthTraceReplayBlockTransactions", reflect.TypeOf((*MockFullNode)(nil).EthTraceReplayBlockTransactions), arg0, arg1, arg2) } +// EthTraceTransaction mocks base method. +func (m *MockFullNode) EthTraceTransaction(arg0 context.Context, arg1 string) ([]*ethtypes.EthTraceTransaction, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EthTraceTransaction", arg0, arg1) + ret0, _ := ret[0].([]*ethtypes.EthTraceTransaction) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// EthTraceTransaction indicates an expected call of EthTraceTransaction. +func (mr *MockFullNodeMockRecorder) EthTraceTransaction(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthTraceTransaction", reflect.TypeOf((*MockFullNode)(nil).EthTraceTransaction), arg0, arg1) +} + // EthUninstallFilter mocks base method. func (m *MockFullNode) EthUninstallFilter(arg0 context.Context, arg1 ethtypes.EthFilterID) (bool, error) { m.ctrl.T.Helper() diff --git a/api/proxy_gen.go b/api/proxy_gen.go index cde8230c4a7..1881a6f9dac 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -5,8 +5,6 @@ package api import ( "context" "encoding/json" - "net/http" - "net/url" "time" "github.com/google/uuid" @@ -20,10 +18,6 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" - datatransfer "github.com/filecoin-project/go-data-transfer/v2" - "github.com/filecoin-project/go-fil-markets/piecestore" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/go-state-types/abi" @@ -42,7 +36,6 @@ import ( "github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/journal/alerting" "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/node/repo/imports" "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/pipeline/sealiface" "github.com/filecoin-project/lotus/storage/sealer/fsutil" @@ -98,58 +91,6 @@ type CommonMethods struct { type CommonStub struct { } -type CommonNetStruct struct { - CommonStruct - - NetStruct - - Internal CommonNetMethods -} - -type CommonNetMethods struct { -} - -type CommonNetStub struct { - CommonStub - - NetStub -} - -type CurioStruct struct { - Internal CurioMethods -} - -type CurioMethods struct { - AllocatePieceToSector func(p0 context.Context, p1 address.Address, p2 PieceDealInfo, p3 int64, p4 url.URL, p5 http.Header) (SectorOffset, error) `perm:"write"` - - LogList func(p0 context.Context) ([]string, error) `perm:"read"` - - LogSetLevel func(p0 context.Context, p1 string, p2 string) error `perm:"admin"` - - Shutdown func(p0 context.Context) error `perm:"admin"` - - StorageAddLocal func(p0 context.Context, p1 string) error `perm:"admin"` - - StorageDetachLocal func(p0 context.Context, p1 string) error `perm:"admin"` - - StorageFindSector func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]storiface.SectorStorageInfo, error) `perm:"admin"` - - StorageInfo func(p0 context.Context, p1 storiface.ID) (storiface.StorageInfo, error) `perm:"admin"` - - StorageInit func(p0 context.Context, p1 string, p2 storiface.LocalStorageMeta) error `perm:"admin"` - - StorageList func(p0 context.Context) (map[storiface.ID][]storiface.Decl, error) `perm:"admin"` - - StorageLocal func(p0 context.Context) (map[storiface.ID]string, error) `perm:"admin"` - - StorageStat func(p0 context.Context, p1 storiface.ID) (fsutil.FsStat, error) `perm:"admin"` - - Version func(p0 context.Context) (Version, error) `perm:"admin"` -} - -type CurioStub struct { -} - type EthSubscriberStruct struct { Internal EthSubscriberMethods } @@ -226,62 +167,6 @@ type FullNodeMethods struct { ChainTipSetWeight func(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) `perm:"read"` - ClientCalcCommP func(p0 context.Context, p1 string) (*CommPRet, error) `perm:"write"` - - ClientCancelDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"` - - ClientCancelRetrievalDeal func(p0 context.Context, p1 retrievalmarket.DealID) error `perm:"write"` - - ClientDataTransferUpdates func(p0 context.Context) (<-chan DataTransferChannel, error) `perm:"write"` - - ClientDealPieceCID func(p0 context.Context, p1 cid.Cid) (DataCIDSize, error) `perm:"read"` - - ClientDealSize func(p0 context.Context, p1 cid.Cid) (DataSize, error) `perm:"read"` - - ClientExport func(p0 context.Context, p1 ExportRef, p2 FileRef) error `perm:"admin"` - - ClientFindData func(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) `perm:"read"` - - ClientGenCar func(p0 context.Context, p1 FileRef, p2 string) error `perm:"write"` - - ClientGetDealInfo func(p0 context.Context, p1 cid.Cid) (*DealInfo, error) `perm:"read"` - - ClientGetDealStatus func(p0 context.Context, p1 uint64) (string, error) `perm:"read"` - - ClientGetDealUpdates func(p0 context.Context) (<-chan DealInfo, error) `perm:"write"` - - ClientGetRetrievalUpdates func(p0 context.Context) (<-chan RetrievalInfo, error) `perm:"write"` - - ClientHasLocal func(p0 context.Context, p1 cid.Cid) (bool, error) `perm:"write"` - - ClientImport func(p0 context.Context, p1 FileRef) (*ImportRes, error) `perm:"admin"` - - ClientListDataTransfers func(p0 context.Context) ([]DataTransferChannel, error) `perm:"write"` - - ClientListDeals func(p0 context.Context) ([]DealInfo, error) `perm:"write"` - - ClientListImports func(p0 context.Context) ([]Import, error) `perm:"write"` - - ClientListRetrievals func(p0 context.Context) ([]RetrievalInfo, error) `perm:"write"` - - ClientMinerQueryOffer func(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (QueryOffer, error) `perm:"read"` - - ClientQueryAsk func(p0 context.Context, p1 peer.ID, p2 address.Address) (*StorageAsk, error) `perm:"read"` - - ClientRemoveImport func(p0 context.Context, p1 imports.ID) error `perm:"admin"` - - ClientRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"` - - ClientRetrieve func(p0 context.Context, p1 RetrievalOrder) (*RestrievalRes, error) `perm:"admin"` - - ClientRetrieveTryRestartInsufficientFunds func(p0 context.Context, p1 address.Address) error `perm:"write"` - - ClientRetrieveWait func(p0 context.Context, p1 retrievalmarket.DealID) error `perm:"admin"` - - ClientStartDeal func(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) `perm:"admin"` - - ClientStatelessDeal func(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) `perm:"write"` - CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"` EthAccounts func(p0 context.Context) ([]ethtypes.EthAddress, error) `perm:"read"` @@ -358,6 +243,8 @@ type FullNodeMethods struct { EthTraceReplayBlockTransactions func(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) `perm:"read"` + EthTraceTransaction func(p0 context.Context, p1 string) ([]*ethtypes.EthTraceTransaction, error) `perm:"read"` + EthUninstallFilter func(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) `perm:"read"` EthUnsubscribe func(p0 context.Context, p1 ethtypes.EthSubscriptionID) (bool, error) `perm:"read"` @@ -788,6 +675,8 @@ type GatewayMethods struct { EthTraceReplayBlockTransactions func(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) `` + EthTraceTransaction func(p0 context.Context, p1 string) ([]*ethtypes.EthTraceTransaction, error) `` + EthUninstallFilter func(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) `` EthUnsubscribe func(p0 context.Context, p1 ethtypes.EthSubscriptionID) (bool, error) `` @@ -959,8 +848,6 @@ type SignableStub struct { type StorageMinerStruct struct { CommonStruct - NetStruct - Internal StorageMinerMethods } @@ -985,100 +872,10 @@ type StorageMinerMethods struct { CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"` - DagstoreGC func(p0 context.Context) ([]DagstoreShardResult, error) `perm:"admin"` - - DagstoreInitializeAll func(p0 context.Context, p1 DagstoreInitializeAllParams) (<-chan DagstoreInitializeAllEvent, error) `perm:"write"` - - DagstoreInitializeShard func(p0 context.Context, p1 string) error `perm:"write"` - - DagstoreListShards func(p0 context.Context) ([]DagstoreShardInfo, error) `perm:"read"` - - DagstoreLookupPieces func(p0 context.Context, p1 cid.Cid) ([]DagstoreShardInfo, error) `perm:"admin"` - - DagstoreRecoverShard func(p0 context.Context, p1 string) error `perm:"write"` - - DagstoreRegisterShard func(p0 context.Context, p1 string) error `perm:"admin"` - - DealsConsiderOfflineRetrievalDeals func(p0 context.Context) (bool, error) `perm:"admin"` - - DealsConsiderOfflineStorageDeals func(p0 context.Context) (bool, error) `perm:"admin"` - - DealsConsiderOnlineRetrievalDeals func(p0 context.Context) (bool, error) `perm:"admin"` - - DealsConsiderOnlineStorageDeals func(p0 context.Context) (bool, error) `perm:"admin"` - - DealsConsiderUnverifiedStorageDeals func(p0 context.Context) (bool, error) `perm:"admin"` - - DealsConsiderVerifiedStorageDeals func(p0 context.Context) (bool, error) `perm:"admin"` - - DealsImportData func(p0 context.Context, p1 cid.Cid, p2 string) error `perm:"admin"` - - DealsList func(p0 context.Context) ([]*MarketDeal, error) `perm:"admin"` - - DealsPieceCidBlocklist func(p0 context.Context) ([]cid.Cid, error) `perm:"admin"` - - DealsSetConsiderOfflineRetrievalDeals func(p0 context.Context, p1 bool) error `perm:"admin"` - - DealsSetConsiderOfflineStorageDeals func(p0 context.Context, p1 bool) error `perm:"admin"` - - DealsSetConsiderOnlineRetrievalDeals func(p0 context.Context, p1 bool) error `perm:"admin"` - - DealsSetConsiderOnlineStorageDeals func(p0 context.Context, p1 bool) error `perm:"admin"` - - DealsSetConsiderUnverifiedStorageDeals func(p0 context.Context, p1 bool) error `perm:"admin"` - - DealsSetConsiderVerifiedStorageDeals func(p0 context.Context, p1 bool) error `perm:"admin"` - - DealsSetPieceCidBlocklist func(p0 context.Context, p1 []cid.Cid) error `perm:"admin"` - - IndexerAnnounceAllDeals func(p0 context.Context) error `perm:"admin"` - - IndexerAnnounceDeal func(p0 context.Context, p1 cid.Cid) error `perm:"admin"` - - MarketCancelDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"` - - MarketDataTransferDiagnostics func(p0 context.Context, p1 peer.ID) (*TransferDiagnostics, error) `perm:"write"` - - MarketDataTransferUpdates func(p0 context.Context) (<-chan DataTransferChannel, error) `perm:"write"` - - MarketGetAsk func(p0 context.Context) (*storagemarket.SignedStorageAsk, error) `perm:"read"` - - MarketGetDealUpdates func(p0 context.Context) (<-chan storagemarket.MinerDeal, error) `perm:"read"` - - MarketGetRetrievalAsk func(p0 context.Context) (*retrievalmarket.Ask, error) `perm:"read"` - - MarketImportDealData func(p0 context.Context, p1 cid.Cid, p2 string) error `perm:"write"` - - MarketListDataTransfers func(p0 context.Context) ([]DataTransferChannel, error) `perm:"write"` - MarketListDeals func(p0 context.Context) ([]*MarketDeal, error) `perm:"read"` - MarketListIncompleteDeals func(p0 context.Context) ([]storagemarket.MinerDeal, error) `perm:"read"` - - MarketListRetrievalDeals func(p0 context.Context) ([]struct{}, error) `perm:"read"` - - MarketPendingDeals func(p0 context.Context) (PendingDealInfo, error) `perm:"write"` - - MarketPublishPendingDeals func(p0 context.Context) error `perm:"admin"` - - MarketRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"` - - MarketRetryPublishDeal func(p0 context.Context, p1 cid.Cid) error `perm:"admin"` - - MarketSetAsk func(p0 context.Context, p1 types.BigInt, p2 types.BigInt, p3 abi.ChainEpoch, p4 abi.PaddedPieceSize, p5 abi.PaddedPieceSize) error `perm:"admin"` - - MarketSetRetrievalAsk func(p0 context.Context, p1 *retrievalmarket.Ask) error `perm:"admin"` - MiningBase func(p0 context.Context) (*types.TipSet, error) `perm:"read"` - PiecesGetCIDInfo func(p0 context.Context, p1 cid.Cid) (*piecestore.CIDInfo, error) `perm:"read"` - - PiecesGetPieceInfo func(p0 context.Context, p1 cid.Cid) (*piecestore.PieceInfo, error) `perm:"read"` - - PiecesListCidInfos func(p0 context.Context) ([]cid.Cid, error) `perm:"read"` - - PiecesListPieces func(p0 context.Context) ([]cid.Cid, error) `perm:"read"` - PledgeSector func(p0 context.Context) (abi.SectorID, error) `perm:"write"` RecoverFault func(p0 context.Context, p1 []abi.SectorNumber) ([]cid.Cid, error) `perm:"admin"` @@ -1234,8 +1031,6 @@ type StorageMinerMethods struct { type StorageMinerStub struct { CommonStub - - NetStub } type WalletStruct struct { @@ -1498,149 +1293,6 @@ func (s *CommonStub) Version(p0 context.Context) (APIVersion, error) { return *new(APIVersion), ErrNotSupported } -func (s *CurioStruct) AllocatePieceToSector(p0 context.Context, p1 address.Address, p2 PieceDealInfo, p3 int64, p4 url.URL, p5 http.Header) (SectorOffset, error) { - if s.Internal.AllocatePieceToSector == nil { - return *new(SectorOffset), ErrNotSupported - } - return s.Internal.AllocatePieceToSector(p0, p1, p2, p3, p4, p5) -} - -func (s *CurioStub) AllocatePieceToSector(p0 context.Context, p1 address.Address, p2 PieceDealInfo, p3 int64, p4 url.URL, p5 http.Header) (SectorOffset, error) { - return *new(SectorOffset), ErrNotSupported -} - -func (s *CurioStruct) LogList(p0 context.Context) ([]string, error) { - if s.Internal.LogList == nil { - return *new([]string), ErrNotSupported - } - return s.Internal.LogList(p0) -} - -func (s *CurioStub) LogList(p0 context.Context) ([]string, error) { - return *new([]string), ErrNotSupported -} - -func (s *CurioStruct) LogSetLevel(p0 context.Context, p1 string, p2 string) error { - if s.Internal.LogSetLevel == nil { - return ErrNotSupported - } - return s.Internal.LogSetLevel(p0, p1, p2) -} - -func (s *CurioStub) LogSetLevel(p0 context.Context, p1 string, p2 string) error { - return ErrNotSupported -} - -func (s *CurioStruct) Shutdown(p0 context.Context) error { - if s.Internal.Shutdown == nil { - return ErrNotSupported - } - return s.Internal.Shutdown(p0) -} - -func (s *CurioStub) Shutdown(p0 context.Context) error { - return ErrNotSupported -} - -func (s *CurioStruct) StorageAddLocal(p0 context.Context, p1 string) error { - if s.Internal.StorageAddLocal == nil { - return ErrNotSupported - } - return s.Internal.StorageAddLocal(p0, p1) -} - -func (s *CurioStub) StorageAddLocal(p0 context.Context, p1 string) error { - return ErrNotSupported -} - -func (s *CurioStruct) StorageDetachLocal(p0 context.Context, p1 string) error { - if s.Internal.StorageDetachLocal == nil { - return ErrNotSupported - } - return s.Internal.StorageDetachLocal(p0, p1) -} - -func (s *CurioStub) StorageDetachLocal(p0 context.Context, p1 string) error { - return ErrNotSupported -} - -func (s *CurioStruct) StorageFindSector(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]storiface.SectorStorageInfo, error) { - if s.Internal.StorageFindSector == nil { - return *new([]storiface.SectorStorageInfo), ErrNotSupported - } - return s.Internal.StorageFindSector(p0, p1, p2, p3, p4) -} - -func (s *CurioStub) StorageFindSector(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]storiface.SectorStorageInfo, error) { - return *new([]storiface.SectorStorageInfo), ErrNotSupported -} - -func (s *CurioStruct) StorageInfo(p0 context.Context, p1 storiface.ID) (storiface.StorageInfo, error) { - if s.Internal.StorageInfo == nil { - return *new(storiface.StorageInfo), ErrNotSupported - } - return s.Internal.StorageInfo(p0, p1) -} - -func (s *CurioStub) StorageInfo(p0 context.Context, p1 storiface.ID) (storiface.StorageInfo, error) { - return *new(storiface.StorageInfo), ErrNotSupported -} - -func (s *CurioStruct) StorageInit(p0 context.Context, p1 string, p2 storiface.LocalStorageMeta) error { - if s.Internal.StorageInit == nil { - return ErrNotSupported - } - return s.Internal.StorageInit(p0, p1, p2) -} - -func (s *CurioStub) StorageInit(p0 context.Context, p1 string, p2 storiface.LocalStorageMeta) error { - return ErrNotSupported -} - -func (s *CurioStruct) StorageList(p0 context.Context) (map[storiface.ID][]storiface.Decl, error) { - if s.Internal.StorageList == nil { - return *new(map[storiface.ID][]storiface.Decl), ErrNotSupported - } - return s.Internal.StorageList(p0) -} - -func (s *CurioStub) StorageList(p0 context.Context) (map[storiface.ID][]storiface.Decl, error) { - return *new(map[storiface.ID][]storiface.Decl), ErrNotSupported -} - -func (s *CurioStruct) StorageLocal(p0 context.Context) (map[storiface.ID]string, error) { - if s.Internal.StorageLocal == nil { - return *new(map[storiface.ID]string), ErrNotSupported - } - return s.Internal.StorageLocal(p0) -} - -func (s *CurioStub) StorageLocal(p0 context.Context) (map[storiface.ID]string, error) { - return *new(map[storiface.ID]string), ErrNotSupported -} - -func (s *CurioStruct) StorageStat(p0 context.Context, p1 storiface.ID) (fsutil.FsStat, error) { - if s.Internal.StorageStat == nil { - return *new(fsutil.FsStat), ErrNotSupported - } - return s.Internal.StorageStat(p0, p1) -} - -func (s *CurioStub) StorageStat(p0 context.Context, p1 storiface.ID) (fsutil.FsStat, error) { - return *new(fsutil.FsStat), ErrNotSupported -} - -func (s *CurioStruct) Version(p0 context.Context) (Version, error) { - if s.Internal.Version == nil { - return *new(Version), ErrNotSupported - } - return s.Internal.Version(p0) -} - -func (s *CurioStub) Version(p0 context.Context) (Version, error) { - return *new(Version), ErrNotSupported -} - func (s *EthSubscriberStruct) EthSubscription(p0 context.Context, p1 jsonrpc.RawParams) error { if s.Internal.EthSubscription == nil { return ErrNotSupported @@ -1960,341 +1612,33 @@ func (s *FullNodeStub) ChainTipSetWeight(p0 context.Context, p1 types.TipSetKey) return *new(types.BigInt), ErrNotSupported } -func (s *FullNodeStruct) ClientCalcCommP(p0 context.Context, p1 string) (*CommPRet, error) { - if s.Internal.ClientCalcCommP == nil { - return nil, ErrNotSupported - } - return s.Internal.ClientCalcCommP(p0, p1) -} - -func (s *FullNodeStub) ClientCalcCommP(p0 context.Context, p1 string) (*CommPRet, error) { - return nil, ErrNotSupported -} - -func (s *FullNodeStruct) ClientCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { - if s.Internal.ClientCancelDataTransfer == nil { +func (s *FullNodeStruct) CreateBackup(p0 context.Context, p1 string) error { + if s.Internal.CreateBackup == nil { return ErrNotSupported } - return s.Internal.ClientCancelDataTransfer(p0, p1, p2, p3) + return s.Internal.CreateBackup(p0, p1) } -func (s *FullNodeStub) ClientCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { +func (s *FullNodeStub) CreateBackup(p0 context.Context, p1 string) error { return ErrNotSupported } -func (s *FullNodeStruct) ClientCancelRetrievalDeal(p0 context.Context, p1 retrievalmarket.DealID) error { - if s.Internal.ClientCancelRetrievalDeal == nil { - return ErrNotSupported +func (s *FullNodeStruct) EthAccounts(p0 context.Context) ([]ethtypes.EthAddress, error) { + if s.Internal.EthAccounts == nil { + return *new([]ethtypes.EthAddress), ErrNotSupported } - return s.Internal.ClientCancelRetrievalDeal(p0, p1) + return s.Internal.EthAccounts(p0) } -func (s *FullNodeStub) ClientCancelRetrievalDeal(p0 context.Context, p1 retrievalmarket.DealID) error { - return ErrNotSupported +func (s *FullNodeStub) EthAccounts(p0 context.Context) ([]ethtypes.EthAddress, error) { + return *new([]ethtypes.EthAddress), ErrNotSupported } -func (s *FullNodeStruct) ClientDataTransferUpdates(p0 context.Context) (<-chan DataTransferChannel, error) { - if s.Internal.ClientDataTransferUpdates == nil { - return nil, ErrNotSupported +func (s *FullNodeStruct) EthAddressToFilecoinAddress(p0 context.Context, p1 ethtypes.EthAddress) (address.Address, error) { + if s.Internal.EthAddressToFilecoinAddress == nil { + return *new(address.Address), ErrNotSupported } - return s.Internal.ClientDataTransferUpdates(p0) -} - -func (s *FullNodeStub) ClientDataTransferUpdates(p0 context.Context) (<-chan DataTransferChannel, error) { - return nil, ErrNotSupported -} - -func (s *FullNodeStruct) ClientDealPieceCID(p0 context.Context, p1 cid.Cid) (DataCIDSize, error) { - if s.Internal.ClientDealPieceCID == nil { - return *new(DataCIDSize), ErrNotSupported - } - return s.Internal.ClientDealPieceCID(p0, p1) -} - -func (s *FullNodeStub) ClientDealPieceCID(p0 context.Context, p1 cid.Cid) (DataCIDSize, error) { - return *new(DataCIDSize), ErrNotSupported -} - -func (s *FullNodeStruct) ClientDealSize(p0 context.Context, p1 cid.Cid) (DataSize, error) { - if s.Internal.ClientDealSize == nil { - return *new(DataSize), ErrNotSupported - } - return s.Internal.ClientDealSize(p0, p1) -} - -func (s *FullNodeStub) ClientDealSize(p0 context.Context, p1 cid.Cid) (DataSize, error) { - return *new(DataSize), ErrNotSupported -} - -func (s *FullNodeStruct) ClientExport(p0 context.Context, p1 ExportRef, p2 FileRef) error { - if s.Internal.ClientExport == nil { - return ErrNotSupported - } - return s.Internal.ClientExport(p0, p1, p2) -} - -func (s *FullNodeStub) ClientExport(p0 context.Context, p1 ExportRef, p2 FileRef) error { - return ErrNotSupported -} - -func (s *FullNodeStruct) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) { - if s.Internal.ClientFindData == nil { - return *new([]QueryOffer), ErrNotSupported - } - return s.Internal.ClientFindData(p0, p1, p2) -} - -func (s *FullNodeStub) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) { - return *new([]QueryOffer), ErrNotSupported -} - -func (s *FullNodeStruct) ClientGenCar(p0 context.Context, p1 FileRef, p2 string) error { - if s.Internal.ClientGenCar == nil { - return ErrNotSupported - } - return s.Internal.ClientGenCar(p0, p1, p2) -} - -func (s *FullNodeStub) ClientGenCar(p0 context.Context, p1 FileRef, p2 string) error { - return ErrNotSupported -} - -func (s *FullNodeStruct) ClientGetDealInfo(p0 context.Context, p1 cid.Cid) (*DealInfo, error) { - if s.Internal.ClientGetDealInfo == nil { - return nil, ErrNotSupported - } - return s.Internal.ClientGetDealInfo(p0, p1) -} - -func (s *FullNodeStub) ClientGetDealInfo(p0 context.Context, p1 cid.Cid) (*DealInfo, error) { - return nil, ErrNotSupported -} - -func (s *FullNodeStruct) ClientGetDealStatus(p0 context.Context, p1 uint64) (string, error) { - if s.Internal.ClientGetDealStatus == nil { - return "", ErrNotSupported - } - return s.Internal.ClientGetDealStatus(p0, p1) -} - -func (s *FullNodeStub) ClientGetDealStatus(p0 context.Context, p1 uint64) (string, error) { - return "", ErrNotSupported -} - -func (s *FullNodeStruct) ClientGetDealUpdates(p0 context.Context) (<-chan DealInfo, error) { - if s.Internal.ClientGetDealUpdates == nil { - return nil, ErrNotSupported - } - return s.Internal.ClientGetDealUpdates(p0) -} - -func (s *FullNodeStub) ClientGetDealUpdates(p0 context.Context) (<-chan DealInfo, error) { - return nil, ErrNotSupported -} - -func (s *FullNodeStruct) ClientGetRetrievalUpdates(p0 context.Context) (<-chan RetrievalInfo, error) { - if s.Internal.ClientGetRetrievalUpdates == nil { - return nil, ErrNotSupported - } - return s.Internal.ClientGetRetrievalUpdates(p0) -} - -func (s *FullNodeStub) ClientGetRetrievalUpdates(p0 context.Context) (<-chan RetrievalInfo, error) { - return nil, ErrNotSupported -} - -func (s *FullNodeStruct) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) { - if s.Internal.ClientHasLocal == nil { - return false, ErrNotSupported - } - return s.Internal.ClientHasLocal(p0, p1) -} - -func (s *FullNodeStub) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) { - return false, ErrNotSupported -} - -func (s *FullNodeStruct) ClientImport(p0 context.Context, p1 FileRef) (*ImportRes, error) { - if s.Internal.ClientImport == nil { - return nil, ErrNotSupported - } - return s.Internal.ClientImport(p0, p1) -} - -func (s *FullNodeStub) ClientImport(p0 context.Context, p1 FileRef) (*ImportRes, error) { - return nil, ErrNotSupported -} - -func (s *FullNodeStruct) ClientListDataTransfers(p0 context.Context) ([]DataTransferChannel, error) { - if s.Internal.ClientListDataTransfers == nil { - return *new([]DataTransferChannel), ErrNotSupported - } - return s.Internal.ClientListDataTransfers(p0) -} - -func (s *FullNodeStub) ClientListDataTransfers(p0 context.Context) ([]DataTransferChannel, error) { - return *new([]DataTransferChannel), ErrNotSupported -} - -func (s *FullNodeStruct) ClientListDeals(p0 context.Context) ([]DealInfo, error) { - if s.Internal.ClientListDeals == nil { - return *new([]DealInfo), ErrNotSupported - } - return s.Internal.ClientListDeals(p0) -} - -func (s *FullNodeStub) ClientListDeals(p0 context.Context) ([]DealInfo, error) { - return *new([]DealInfo), ErrNotSupported -} - -func (s *FullNodeStruct) ClientListImports(p0 context.Context) ([]Import, error) { - if s.Internal.ClientListImports == nil { - return *new([]Import), ErrNotSupported - } - return s.Internal.ClientListImports(p0) -} - -func (s *FullNodeStub) ClientListImports(p0 context.Context) ([]Import, error) { - return *new([]Import), ErrNotSupported -} - -func (s *FullNodeStruct) ClientListRetrievals(p0 context.Context) ([]RetrievalInfo, error) { - if s.Internal.ClientListRetrievals == nil { - return *new([]RetrievalInfo), ErrNotSupported - } - return s.Internal.ClientListRetrievals(p0) -} - -func (s *FullNodeStub) ClientListRetrievals(p0 context.Context) ([]RetrievalInfo, error) { - return *new([]RetrievalInfo), ErrNotSupported -} - -func (s *FullNodeStruct) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (QueryOffer, error) { - if s.Internal.ClientMinerQueryOffer == nil { - return *new(QueryOffer), ErrNotSupported - } - return s.Internal.ClientMinerQueryOffer(p0, p1, p2, p3) -} - -func (s *FullNodeStub) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (QueryOffer, error) { - return *new(QueryOffer), ErrNotSupported -} - -func (s *FullNodeStruct) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*StorageAsk, error) { - if s.Internal.ClientQueryAsk == nil { - return nil, ErrNotSupported - } - return s.Internal.ClientQueryAsk(p0, p1, p2) -} - -func (s *FullNodeStub) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*StorageAsk, error) { - return nil, ErrNotSupported -} - -func (s *FullNodeStruct) ClientRemoveImport(p0 context.Context, p1 imports.ID) error { - if s.Internal.ClientRemoveImport == nil { - return ErrNotSupported - } - return s.Internal.ClientRemoveImport(p0, p1) -} - -func (s *FullNodeStub) ClientRemoveImport(p0 context.Context, p1 imports.ID) error { - return ErrNotSupported -} - -func (s *FullNodeStruct) ClientRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { - if s.Internal.ClientRestartDataTransfer == nil { - return ErrNotSupported - } - return s.Internal.ClientRestartDataTransfer(p0, p1, p2, p3) -} - -func (s *FullNodeStub) ClientRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { - return ErrNotSupported -} - -func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 RetrievalOrder) (*RestrievalRes, error) { - if s.Internal.ClientRetrieve == nil { - return nil, ErrNotSupported - } - return s.Internal.ClientRetrieve(p0, p1) -} - -func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 RetrievalOrder) (*RestrievalRes, error) { - return nil, ErrNotSupported -} - -func (s *FullNodeStruct) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error { - if s.Internal.ClientRetrieveTryRestartInsufficientFunds == nil { - return ErrNotSupported - } - return s.Internal.ClientRetrieveTryRestartInsufficientFunds(p0, p1) -} - -func (s *FullNodeStub) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error { - return ErrNotSupported -} - -func (s *FullNodeStruct) ClientRetrieveWait(p0 context.Context, p1 retrievalmarket.DealID) error { - if s.Internal.ClientRetrieveWait == nil { - return ErrNotSupported - } - return s.Internal.ClientRetrieveWait(p0, p1) -} - -func (s *FullNodeStub) ClientRetrieveWait(p0 context.Context, p1 retrievalmarket.DealID) error { - return ErrNotSupported -} - -func (s *FullNodeStruct) ClientStartDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) { - if s.Internal.ClientStartDeal == nil { - return nil, ErrNotSupported - } - return s.Internal.ClientStartDeal(p0, p1) -} - -func (s *FullNodeStub) ClientStartDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) { - return nil, ErrNotSupported -} - -func (s *FullNodeStruct) ClientStatelessDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) { - if s.Internal.ClientStatelessDeal == nil { - return nil, ErrNotSupported - } - return s.Internal.ClientStatelessDeal(p0, p1) -} - -func (s *FullNodeStub) ClientStatelessDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) { - return nil, ErrNotSupported -} - -func (s *FullNodeStruct) CreateBackup(p0 context.Context, p1 string) error { - if s.Internal.CreateBackup == nil { - return ErrNotSupported - } - return s.Internal.CreateBackup(p0, p1) -} - -func (s *FullNodeStub) CreateBackup(p0 context.Context, p1 string) error { - return ErrNotSupported -} - -func (s *FullNodeStruct) EthAccounts(p0 context.Context) ([]ethtypes.EthAddress, error) { - if s.Internal.EthAccounts == nil { - return *new([]ethtypes.EthAddress), ErrNotSupported - } - return s.Internal.EthAccounts(p0) -} - -func (s *FullNodeStub) EthAccounts(p0 context.Context) ([]ethtypes.EthAddress, error) { - return *new([]ethtypes.EthAddress), ErrNotSupported -} - -func (s *FullNodeStruct) EthAddressToFilecoinAddress(p0 context.Context, p1 ethtypes.EthAddress) (address.Address, error) { - if s.Internal.EthAddressToFilecoinAddress == nil { - return *new(address.Address), ErrNotSupported - } - return s.Internal.EthAddressToFilecoinAddress(p0, p1) + return s.Internal.EthAddressToFilecoinAddress(p0, p1) } func (s *FullNodeStub) EthAddressToFilecoinAddress(p0 context.Context, p1 ethtypes.EthAddress) (address.Address, error) { @@ -2686,6 +2030,17 @@ func (s *FullNodeStub) EthTraceReplayBlockTransactions(p0 context.Context, p1 st return *new([]*ethtypes.EthTraceReplayBlockTransaction), ErrNotSupported } +func (s *FullNodeStruct) EthTraceTransaction(p0 context.Context, p1 string) ([]*ethtypes.EthTraceTransaction, error) { + if s.Internal.EthTraceTransaction == nil { + return *new([]*ethtypes.EthTraceTransaction), ErrNotSupported + } + return s.Internal.EthTraceTransaction(p0, p1) +} + +func (s *FullNodeStub) EthTraceTransaction(p0 context.Context, p1 string) ([]*ethtypes.EthTraceTransaction, error) { + return *new([]*ethtypes.EthTraceTransaction), ErrNotSupported +} + func (s *FullNodeStruct) EthUninstallFilter(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) { if s.Internal.EthUninstallFilter == nil { return false, ErrNotSupported @@ -4985,6 +4340,17 @@ func (s *GatewayStub) EthTraceReplayBlockTransactions(p0 context.Context, p1 str return *new([]*ethtypes.EthTraceReplayBlockTransaction), ErrNotSupported } +func (s *GatewayStruct) EthTraceTransaction(p0 context.Context, p1 string) ([]*ethtypes.EthTraceTransaction, error) { + if s.Internal.EthTraceTransaction == nil { + return *new([]*ethtypes.EthTraceTransaction), ErrNotSupported + } + return s.Internal.EthTraceTransaction(p0, p1) +} + +func (s *GatewayStub) EthTraceTransaction(p0 context.Context, p1 string) ([]*ethtypes.EthTraceTransaction, error) { + return *new([]*ethtypes.EthTraceTransaction), ErrNotSupported +} + func (s *GatewayStruct) EthUninstallFilter(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) { if s.Internal.EthUninstallFilter == nil { return false, ErrNotSupported @@ -5898,369 +5264,6 @@ func (s *StorageMinerStub) CreateBackup(p0 context.Context, p1 string) error { return ErrNotSupported } -func (s *StorageMinerStruct) DagstoreGC(p0 context.Context) ([]DagstoreShardResult, error) { - if s.Internal.DagstoreGC == nil { - return *new([]DagstoreShardResult), ErrNotSupported - } - return s.Internal.DagstoreGC(p0) -} - -func (s *StorageMinerStub) DagstoreGC(p0 context.Context) ([]DagstoreShardResult, error) { - return *new([]DagstoreShardResult), ErrNotSupported -} - -func (s *StorageMinerStruct) DagstoreInitializeAll(p0 context.Context, p1 DagstoreInitializeAllParams) (<-chan DagstoreInitializeAllEvent, error) { - if s.Internal.DagstoreInitializeAll == nil { - return nil, ErrNotSupported - } - return s.Internal.DagstoreInitializeAll(p0, p1) -} - -func (s *StorageMinerStub) DagstoreInitializeAll(p0 context.Context, p1 DagstoreInitializeAllParams) (<-chan DagstoreInitializeAllEvent, error) { - return nil, ErrNotSupported -} - -func (s *StorageMinerStruct) DagstoreInitializeShard(p0 context.Context, p1 string) error { - if s.Internal.DagstoreInitializeShard == nil { - return ErrNotSupported - } - return s.Internal.DagstoreInitializeShard(p0, p1) -} - -func (s *StorageMinerStub) DagstoreInitializeShard(p0 context.Context, p1 string) error { - return ErrNotSupported -} - -func (s *StorageMinerStruct) DagstoreListShards(p0 context.Context) ([]DagstoreShardInfo, error) { - if s.Internal.DagstoreListShards == nil { - return *new([]DagstoreShardInfo), ErrNotSupported - } - return s.Internal.DagstoreListShards(p0) -} - -func (s *StorageMinerStub) DagstoreListShards(p0 context.Context) ([]DagstoreShardInfo, error) { - return *new([]DagstoreShardInfo), ErrNotSupported -} - -func (s *StorageMinerStruct) DagstoreLookupPieces(p0 context.Context, p1 cid.Cid) ([]DagstoreShardInfo, error) { - if s.Internal.DagstoreLookupPieces == nil { - return *new([]DagstoreShardInfo), ErrNotSupported - } - return s.Internal.DagstoreLookupPieces(p0, p1) -} - -func (s *StorageMinerStub) DagstoreLookupPieces(p0 context.Context, p1 cid.Cid) ([]DagstoreShardInfo, error) { - return *new([]DagstoreShardInfo), ErrNotSupported -} - -func (s *StorageMinerStruct) DagstoreRecoverShard(p0 context.Context, p1 string) error { - if s.Internal.DagstoreRecoverShard == nil { - return ErrNotSupported - } - return s.Internal.DagstoreRecoverShard(p0, p1) -} - -func (s *StorageMinerStub) DagstoreRecoverShard(p0 context.Context, p1 string) error { - return ErrNotSupported -} - -func (s *StorageMinerStruct) DagstoreRegisterShard(p0 context.Context, p1 string) error { - if s.Internal.DagstoreRegisterShard == nil { - return ErrNotSupported - } - return s.Internal.DagstoreRegisterShard(p0, p1) -} - -func (s *StorageMinerStub) DagstoreRegisterShard(p0 context.Context, p1 string) error { - return ErrNotSupported -} - -func (s *StorageMinerStruct) DealsConsiderOfflineRetrievalDeals(p0 context.Context) (bool, error) { - if s.Internal.DealsConsiderOfflineRetrievalDeals == nil { - return false, ErrNotSupported - } - return s.Internal.DealsConsiderOfflineRetrievalDeals(p0) -} - -func (s *StorageMinerStub) DealsConsiderOfflineRetrievalDeals(p0 context.Context) (bool, error) { - return false, ErrNotSupported -} - -func (s *StorageMinerStruct) DealsConsiderOfflineStorageDeals(p0 context.Context) (bool, error) { - if s.Internal.DealsConsiderOfflineStorageDeals == nil { - return false, ErrNotSupported - } - return s.Internal.DealsConsiderOfflineStorageDeals(p0) -} - -func (s *StorageMinerStub) DealsConsiderOfflineStorageDeals(p0 context.Context) (bool, error) { - return false, ErrNotSupported -} - -func (s *StorageMinerStruct) DealsConsiderOnlineRetrievalDeals(p0 context.Context) (bool, error) { - if s.Internal.DealsConsiderOnlineRetrievalDeals == nil { - return false, ErrNotSupported - } - return s.Internal.DealsConsiderOnlineRetrievalDeals(p0) -} - -func (s *StorageMinerStub) DealsConsiderOnlineRetrievalDeals(p0 context.Context) (bool, error) { - return false, ErrNotSupported -} - -func (s *StorageMinerStruct) DealsConsiderOnlineStorageDeals(p0 context.Context) (bool, error) { - if s.Internal.DealsConsiderOnlineStorageDeals == nil { - return false, ErrNotSupported - } - return s.Internal.DealsConsiderOnlineStorageDeals(p0) -} - -func (s *StorageMinerStub) DealsConsiderOnlineStorageDeals(p0 context.Context) (bool, error) { - return false, ErrNotSupported -} - -func (s *StorageMinerStruct) DealsConsiderUnverifiedStorageDeals(p0 context.Context) (bool, error) { - if s.Internal.DealsConsiderUnverifiedStorageDeals == nil { - return false, ErrNotSupported - } - return s.Internal.DealsConsiderUnverifiedStorageDeals(p0) -} - -func (s *StorageMinerStub) DealsConsiderUnverifiedStorageDeals(p0 context.Context) (bool, error) { - return false, ErrNotSupported -} - -func (s *StorageMinerStruct) DealsConsiderVerifiedStorageDeals(p0 context.Context) (bool, error) { - if s.Internal.DealsConsiderVerifiedStorageDeals == nil { - return false, ErrNotSupported - } - return s.Internal.DealsConsiderVerifiedStorageDeals(p0) -} - -func (s *StorageMinerStub) DealsConsiderVerifiedStorageDeals(p0 context.Context) (bool, error) { - return false, ErrNotSupported -} - -func (s *StorageMinerStruct) DealsImportData(p0 context.Context, p1 cid.Cid, p2 string) error { - if s.Internal.DealsImportData == nil { - return ErrNotSupported - } - return s.Internal.DealsImportData(p0, p1, p2) -} - -func (s *StorageMinerStub) DealsImportData(p0 context.Context, p1 cid.Cid, p2 string) error { - return ErrNotSupported -} - -func (s *StorageMinerStruct) DealsList(p0 context.Context) ([]*MarketDeal, error) { - if s.Internal.DealsList == nil { - return *new([]*MarketDeal), ErrNotSupported - } - return s.Internal.DealsList(p0) -} - -func (s *StorageMinerStub) DealsList(p0 context.Context) ([]*MarketDeal, error) { - return *new([]*MarketDeal), ErrNotSupported -} - -func (s *StorageMinerStruct) DealsPieceCidBlocklist(p0 context.Context) ([]cid.Cid, error) { - if s.Internal.DealsPieceCidBlocklist == nil { - return *new([]cid.Cid), ErrNotSupported - } - return s.Internal.DealsPieceCidBlocklist(p0) -} - -func (s *StorageMinerStub) DealsPieceCidBlocklist(p0 context.Context) ([]cid.Cid, error) { - return *new([]cid.Cid), ErrNotSupported -} - -func (s *StorageMinerStruct) DealsSetConsiderOfflineRetrievalDeals(p0 context.Context, p1 bool) error { - if s.Internal.DealsSetConsiderOfflineRetrievalDeals == nil { - return ErrNotSupported - } - return s.Internal.DealsSetConsiderOfflineRetrievalDeals(p0, p1) -} - -func (s *StorageMinerStub) DealsSetConsiderOfflineRetrievalDeals(p0 context.Context, p1 bool) error { - return ErrNotSupported -} - -func (s *StorageMinerStruct) DealsSetConsiderOfflineStorageDeals(p0 context.Context, p1 bool) error { - if s.Internal.DealsSetConsiderOfflineStorageDeals == nil { - return ErrNotSupported - } - return s.Internal.DealsSetConsiderOfflineStorageDeals(p0, p1) -} - -func (s *StorageMinerStub) DealsSetConsiderOfflineStorageDeals(p0 context.Context, p1 bool) error { - return ErrNotSupported -} - -func (s *StorageMinerStruct) DealsSetConsiderOnlineRetrievalDeals(p0 context.Context, p1 bool) error { - if s.Internal.DealsSetConsiderOnlineRetrievalDeals == nil { - return ErrNotSupported - } - return s.Internal.DealsSetConsiderOnlineRetrievalDeals(p0, p1) -} - -func (s *StorageMinerStub) DealsSetConsiderOnlineRetrievalDeals(p0 context.Context, p1 bool) error { - return ErrNotSupported -} - -func (s *StorageMinerStruct) DealsSetConsiderOnlineStorageDeals(p0 context.Context, p1 bool) error { - if s.Internal.DealsSetConsiderOnlineStorageDeals == nil { - return ErrNotSupported - } - return s.Internal.DealsSetConsiderOnlineStorageDeals(p0, p1) -} - -func (s *StorageMinerStub) DealsSetConsiderOnlineStorageDeals(p0 context.Context, p1 bool) error { - return ErrNotSupported -} - -func (s *StorageMinerStruct) DealsSetConsiderUnverifiedStorageDeals(p0 context.Context, p1 bool) error { - if s.Internal.DealsSetConsiderUnverifiedStorageDeals == nil { - return ErrNotSupported - } - return s.Internal.DealsSetConsiderUnverifiedStorageDeals(p0, p1) -} - -func (s *StorageMinerStub) DealsSetConsiderUnverifiedStorageDeals(p0 context.Context, p1 bool) error { - return ErrNotSupported -} - -func (s *StorageMinerStruct) DealsSetConsiderVerifiedStorageDeals(p0 context.Context, p1 bool) error { - if s.Internal.DealsSetConsiderVerifiedStorageDeals == nil { - return ErrNotSupported - } - return s.Internal.DealsSetConsiderVerifiedStorageDeals(p0, p1) -} - -func (s *StorageMinerStub) DealsSetConsiderVerifiedStorageDeals(p0 context.Context, p1 bool) error { - return ErrNotSupported -} - -func (s *StorageMinerStruct) DealsSetPieceCidBlocklist(p0 context.Context, p1 []cid.Cid) error { - if s.Internal.DealsSetPieceCidBlocklist == nil { - return ErrNotSupported - } - return s.Internal.DealsSetPieceCidBlocklist(p0, p1) -} - -func (s *StorageMinerStub) DealsSetPieceCidBlocklist(p0 context.Context, p1 []cid.Cid) error { - return ErrNotSupported -} - -func (s *StorageMinerStruct) IndexerAnnounceAllDeals(p0 context.Context) error { - if s.Internal.IndexerAnnounceAllDeals == nil { - return ErrNotSupported - } - return s.Internal.IndexerAnnounceAllDeals(p0) -} - -func (s *StorageMinerStub) IndexerAnnounceAllDeals(p0 context.Context) error { - return ErrNotSupported -} - -func (s *StorageMinerStruct) IndexerAnnounceDeal(p0 context.Context, p1 cid.Cid) error { - if s.Internal.IndexerAnnounceDeal == nil { - return ErrNotSupported - } - return s.Internal.IndexerAnnounceDeal(p0, p1) -} - -func (s *StorageMinerStub) IndexerAnnounceDeal(p0 context.Context, p1 cid.Cid) error { - return ErrNotSupported -} - -func (s *StorageMinerStruct) MarketCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { - if s.Internal.MarketCancelDataTransfer == nil { - return ErrNotSupported - } - return s.Internal.MarketCancelDataTransfer(p0, p1, p2, p3) -} - -func (s *StorageMinerStub) MarketCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { - return ErrNotSupported -} - -func (s *StorageMinerStruct) MarketDataTransferDiagnostics(p0 context.Context, p1 peer.ID) (*TransferDiagnostics, error) { - if s.Internal.MarketDataTransferDiagnostics == nil { - return nil, ErrNotSupported - } - return s.Internal.MarketDataTransferDiagnostics(p0, p1) -} - -func (s *StorageMinerStub) MarketDataTransferDiagnostics(p0 context.Context, p1 peer.ID) (*TransferDiagnostics, error) { - return nil, ErrNotSupported -} - -func (s *StorageMinerStruct) MarketDataTransferUpdates(p0 context.Context) (<-chan DataTransferChannel, error) { - if s.Internal.MarketDataTransferUpdates == nil { - return nil, ErrNotSupported - } - return s.Internal.MarketDataTransferUpdates(p0) -} - -func (s *StorageMinerStub) MarketDataTransferUpdates(p0 context.Context) (<-chan DataTransferChannel, error) { - return nil, ErrNotSupported -} - -func (s *StorageMinerStruct) MarketGetAsk(p0 context.Context) (*storagemarket.SignedStorageAsk, error) { - if s.Internal.MarketGetAsk == nil { - return nil, ErrNotSupported - } - return s.Internal.MarketGetAsk(p0) -} - -func (s *StorageMinerStub) MarketGetAsk(p0 context.Context) (*storagemarket.SignedStorageAsk, error) { - return nil, ErrNotSupported -} - -func (s *StorageMinerStruct) MarketGetDealUpdates(p0 context.Context) (<-chan storagemarket.MinerDeal, error) { - if s.Internal.MarketGetDealUpdates == nil { - return nil, ErrNotSupported - } - return s.Internal.MarketGetDealUpdates(p0) -} - -func (s *StorageMinerStub) MarketGetDealUpdates(p0 context.Context) (<-chan storagemarket.MinerDeal, error) { - return nil, ErrNotSupported -} - -func (s *StorageMinerStruct) MarketGetRetrievalAsk(p0 context.Context) (*retrievalmarket.Ask, error) { - if s.Internal.MarketGetRetrievalAsk == nil { - return nil, ErrNotSupported - } - return s.Internal.MarketGetRetrievalAsk(p0) -} - -func (s *StorageMinerStub) MarketGetRetrievalAsk(p0 context.Context) (*retrievalmarket.Ask, error) { - return nil, ErrNotSupported -} - -func (s *StorageMinerStruct) MarketImportDealData(p0 context.Context, p1 cid.Cid, p2 string) error { - if s.Internal.MarketImportDealData == nil { - return ErrNotSupported - } - return s.Internal.MarketImportDealData(p0, p1, p2) -} - -func (s *StorageMinerStub) MarketImportDealData(p0 context.Context, p1 cid.Cid, p2 string) error { - return ErrNotSupported -} - -func (s *StorageMinerStruct) MarketListDataTransfers(p0 context.Context) ([]DataTransferChannel, error) { - if s.Internal.MarketListDataTransfers == nil { - return *new([]DataTransferChannel), ErrNotSupported - } - return s.Internal.MarketListDataTransfers(p0) -} - -func (s *StorageMinerStub) MarketListDataTransfers(p0 context.Context) ([]DataTransferChannel, error) { - return *new([]DataTransferChannel), ErrNotSupported -} - func (s *StorageMinerStruct) MarketListDeals(p0 context.Context) ([]*MarketDeal, error) { if s.Internal.MarketListDeals == nil { return *new([]*MarketDeal), ErrNotSupported @@ -6272,94 +5275,6 @@ func (s *StorageMinerStub) MarketListDeals(p0 context.Context) ([]*MarketDeal, e return *new([]*MarketDeal), ErrNotSupported } -func (s *StorageMinerStruct) MarketListIncompleteDeals(p0 context.Context) ([]storagemarket.MinerDeal, error) { - if s.Internal.MarketListIncompleteDeals == nil { - return *new([]storagemarket.MinerDeal), ErrNotSupported - } - return s.Internal.MarketListIncompleteDeals(p0) -} - -func (s *StorageMinerStub) MarketListIncompleteDeals(p0 context.Context) ([]storagemarket.MinerDeal, error) { - return *new([]storagemarket.MinerDeal), ErrNotSupported -} - -func (s *StorageMinerStruct) MarketListRetrievalDeals(p0 context.Context) ([]struct{}, error) { - if s.Internal.MarketListRetrievalDeals == nil { - return *new([]struct{}), ErrNotSupported - } - return s.Internal.MarketListRetrievalDeals(p0) -} - -func (s *StorageMinerStub) MarketListRetrievalDeals(p0 context.Context) ([]struct{}, error) { - return *new([]struct{}), ErrNotSupported -} - -func (s *StorageMinerStruct) MarketPendingDeals(p0 context.Context) (PendingDealInfo, error) { - if s.Internal.MarketPendingDeals == nil { - return *new(PendingDealInfo), ErrNotSupported - } - return s.Internal.MarketPendingDeals(p0) -} - -func (s *StorageMinerStub) MarketPendingDeals(p0 context.Context) (PendingDealInfo, error) { - return *new(PendingDealInfo), ErrNotSupported -} - -func (s *StorageMinerStruct) MarketPublishPendingDeals(p0 context.Context) error { - if s.Internal.MarketPublishPendingDeals == nil { - return ErrNotSupported - } - return s.Internal.MarketPublishPendingDeals(p0) -} - -func (s *StorageMinerStub) MarketPublishPendingDeals(p0 context.Context) error { - return ErrNotSupported -} - -func (s *StorageMinerStruct) MarketRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { - if s.Internal.MarketRestartDataTransfer == nil { - return ErrNotSupported - } - return s.Internal.MarketRestartDataTransfer(p0, p1, p2, p3) -} - -func (s *StorageMinerStub) MarketRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { - return ErrNotSupported -} - -func (s *StorageMinerStruct) MarketRetryPublishDeal(p0 context.Context, p1 cid.Cid) error { - if s.Internal.MarketRetryPublishDeal == nil { - return ErrNotSupported - } - return s.Internal.MarketRetryPublishDeal(p0, p1) -} - -func (s *StorageMinerStub) MarketRetryPublishDeal(p0 context.Context, p1 cid.Cid) error { - return ErrNotSupported -} - -func (s *StorageMinerStruct) MarketSetAsk(p0 context.Context, p1 types.BigInt, p2 types.BigInt, p3 abi.ChainEpoch, p4 abi.PaddedPieceSize, p5 abi.PaddedPieceSize) error { - if s.Internal.MarketSetAsk == nil { - return ErrNotSupported - } - return s.Internal.MarketSetAsk(p0, p1, p2, p3, p4, p5) -} - -func (s *StorageMinerStub) MarketSetAsk(p0 context.Context, p1 types.BigInt, p2 types.BigInt, p3 abi.ChainEpoch, p4 abi.PaddedPieceSize, p5 abi.PaddedPieceSize) error { - return ErrNotSupported -} - -func (s *StorageMinerStruct) MarketSetRetrievalAsk(p0 context.Context, p1 *retrievalmarket.Ask) error { - if s.Internal.MarketSetRetrievalAsk == nil { - return ErrNotSupported - } - return s.Internal.MarketSetRetrievalAsk(p0, p1) -} - -func (s *StorageMinerStub) MarketSetRetrievalAsk(p0 context.Context, p1 *retrievalmarket.Ask) error { - return ErrNotSupported -} - func (s *StorageMinerStruct) MiningBase(p0 context.Context) (*types.TipSet, error) { if s.Internal.MiningBase == nil { return nil, ErrNotSupported @@ -6371,50 +5286,6 @@ func (s *StorageMinerStub) MiningBase(p0 context.Context) (*types.TipSet, error) return nil, ErrNotSupported } -func (s *StorageMinerStruct) PiecesGetCIDInfo(p0 context.Context, p1 cid.Cid) (*piecestore.CIDInfo, error) { - if s.Internal.PiecesGetCIDInfo == nil { - return nil, ErrNotSupported - } - return s.Internal.PiecesGetCIDInfo(p0, p1) -} - -func (s *StorageMinerStub) PiecesGetCIDInfo(p0 context.Context, p1 cid.Cid) (*piecestore.CIDInfo, error) { - return nil, ErrNotSupported -} - -func (s *StorageMinerStruct) PiecesGetPieceInfo(p0 context.Context, p1 cid.Cid) (*piecestore.PieceInfo, error) { - if s.Internal.PiecesGetPieceInfo == nil { - return nil, ErrNotSupported - } - return s.Internal.PiecesGetPieceInfo(p0, p1) -} - -func (s *StorageMinerStub) PiecesGetPieceInfo(p0 context.Context, p1 cid.Cid) (*piecestore.PieceInfo, error) { - return nil, ErrNotSupported -} - -func (s *StorageMinerStruct) PiecesListCidInfos(p0 context.Context) ([]cid.Cid, error) { - if s.Internal.PiecesListCidInfos == nil { - return *new([]cid.Cid), ErrNotSupported - } - return s.Internal.PiecesListCidInfos(p0) -} - -func (s *StorageMinerStub) PiecesListCidInfos(p0 context.Context) ([]cid.Cid, error) { - return *new([]cid.Cid), ErrNotSupported -} - -func (s *StorageMinerStruct) PiecesListPieces(p0 context.Context) ([]cid.Cid, error) { - if s.Internal.PiecesListPieces == nil { - return *new([]cid.Cid), ErrNotSupported - } - return s.Internal.PiecesListPieces(p0) -} - -func (s *StorageMinerStub) PiecesListPieces(p0 context.Context) ([]cid.Cid, error) { - return *new([]cid.Cid), ErrNotSupported -} - func (s *StorageMinerStruct) PledgeSector(p0 context.Context) (abi.SectorID, error) { if s.Internal.PledgeSector == nil { return *new(abi.SectorID), ErrNotSupported @@ -7737,8 +6608,6 @@ func (s *WorkerStub) WaitQuiet(p0 context.Context) error { var _ ChainIO = new(ChainIOStruct) var _ Common = new(CommonStruct) -var _ CommonNet = new(CommonNetStruct) -var _ Curio = new(CurioStruct) var _ EthSubscriber = new(EthSubscriberStruct) var _ FullNode = new(FullNodeStruct) var _ Gateway = new(GatewayStruct) diff --git a/api/types.go b/api/types.go index b7dbe7b3625..61f6cb8bafe 100644 --- a/api/types.go +++ b/api/types.go @@ -1,23 +1,15 @@ package api import ( - "encoding/json" - "fmt" "time" "github.com/google/uuid" "github.com/ipfs/go-cid" - "github.com/ipfs/go-graphsync" - "github.com/ipld/go-ipld-prime" - "github.com/ipld/go-ipld-prime/codec/dagjson" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" "github.com/filecoin-project/go-address" - datatransfer "github.com/filecoin-project/go-data-transfer/v2" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" @@ -25,27 +17,6 @@ import ( "github.com/filecoin-project/lotus/node/modules/dtypes" ) -type MultiaddrSlice []ma.Multiaddr - -func (m *MultiaddrSlice) UnmarshalJSON(raw []byte) (err error) { - var temp []string - if err := json.Unmarshal(raw, &temp); err != nil { - return err - } - - res := make([]ma.Multiaddr, len(temp)) - for i, str := range temp { - res[i], err = ma.NewMultiaddr(str) - if err != nil { - return err - } - } - *m = res - return nil -} - -var _ json.Unmarshaler = new(MultiaddrSlice) - type ObjStat struct { Size uint64 Links uint64 @@ -69,71 +40,6 @@ type MessageSendSpec struct { MaximizeFeeCap bool } -// GraphSyncDataTransfer provides diagnostics on a data transfer happening over graphsync -type GraphSyncDataTransfer struct { - // GraphSync request id for this transfer - RequestID *graphsync.RequestID - // Graphsync state for this transfer - RequestState string - // If a channel ID is present, indicates whether this is the current graphsync request for this channel - // (could have changed in a restart) - IsCurrentChannelRequest bool - // Data transfer channel ID for this transfer - ChannelID *datatransfer.ChannelID - // Data transfer state for this transfer - ChannelState *DataTransferChannel - // Diagnostic information about this request -- and unexpected inconsistencies in - // request state - Diagnostics []string -} - -// TransferDiagnostics give current information about transfers going over graphsync that may be helpful for debugging -type TransferDiagnostics struct { - ReceivingTransfers []*GraphSyncDataTransfer - SendingTransfers []*GraphSyncDataTransfer -} - -type DataTransferChannel struct { - TransferID datatransfer.TransferID - Status datatransfer.Status - BaseCID cid.Cid - IsInitiator bool - IsSender bool - Voucher string - Message string - OtherPeer peer.ID - Transferred uint64 - Stages *datatransfer.ChannelStages -} - -// NewDataTransferChannel constructs an API DataTransferChannel type from full channel state snapshot and a host id -func NewDataTransferChannel(hostID peer.ID, channelState datatransfer.ChannelState) DataTransferChannel { - channel := DataTransferChannel{ - TransferID: channelState.TransferID(), - Status: channelState.Status(), - BaseCID: channelState.BaseCID(), - IsSender: channelState.Sender() == hostID, - Message: channelState.Message(), - } - voucher := channelState.Voucher() - voucherJSON, err := ipld.Encode(voucher.Voucher, dagjson.Encode) - if err != nil { - channel.Voucher = fmt.Errorf("Voucher Serialization: %w", err).Error() - } else { - channel.Voucher = string(voucherJSON) - } - if channel.IsSender { - channel.IsInitiator = !channelState.IsPull() - channel.Transferred = channelState.Sent() - channel.OtherPeer = channelState.Recipient() - } else { - channel.IsInitiator = channelState.IsPull() - channel.Transferred = channelState.Received() - channel.OtherPeer = channelState.Sender() - } - return channel -} - type NetStat struct { System *network.ScopeStat `json:",omitempty"` Transient *network.ScopeStat `json:",omitempty"` @@ -229,67 +135,6 @@ type MessagePrototype struct { ValidNonce bool } -type RetrievalInfo struct { - PayloadCID cid.Cid - ID retrievalmarket.DealID - PieceCID *cid.Cid - PricePerByte abi.TokenAmount - UnsealPrice abi.TokenAmount - - Status retrievalmarket.DealStatus - Message string // more information about deal state, particularly errors - Provider peer.ID - BytesReceived uint64 - BytesPaidFor uint64 - TotalPaid abi.TokenAmount - - TransferChannelID *datatransfer.ChannelID - DataTransfer *DataTransferChannel - - // optional event if part of ClientGetRetrievalUpdates - Event *retrievalmarket.ClientEvent -} - -type RestrievalRes struct { - DealID retrievalmarket.DealID -} - -// Selector specifies ipld selector string -// - if the string starts with '{', it's interpreted as json selector string -// see https://ipld.io/specs/selectors/ and https://ipld.io/specs/selectors/fixtures/selector-fixtures-1/ -// - otherwise the string is interpreted as ipld-selector-text-lite (simple ipld path) -// see https://github.com/ipld/go-ipld-selector-text-lite -type Selector string - -type DagSpec struct { - // DataSelector matches data to be retrieved - // - when using textselector, the path specifies subtree - // - the matched graph must have a single root - DataSelector *Selector - - // ExportMerkleProof is applicable only when exporting to a CAR file via a path textselector - // When true, in addition to the selection target, the resulting CAR will contain every block along the - // path back to, and including the original root - // When false the resulting CAR contains only the blocks of the target subdag - ExportMerkleProof bool -} - -type ExportRef struct { - Root cid.Cid - - // DAGs array specifies a list of DAGs to export - // - If exporting into unixfs files, only one DAG is supported, DataSelector is only used to find the targeted root node - // - If exporting into a car file - // - When exactly one text-path DataSelector is specified exports the subgraph and its full merkle-path from the original root - // - Otherwise ( multiple paths and/or JSON selector specs) determines each individual subroot and exports the subtrees as a multi-root car - // - When not specified defaults to a single DAG: - // - Data - the entire DAG: `{"R":{"l":{"none":{}},":>":{"a":{">":{"@":{}}}}}}` - DAGs []DagSpec - - FromLocalCAR string // if specified, get data from a local CARv2 file. - DealID retrievalmarket.DealID -} - type MinerInfo struct { Owner address.Address // Must be an ID-address. Worker address.Address // Must be an ID-address. @@ -346,6 +191,7 @@ type ForkUpgradeParams struct { UpgradeWatermelonHeight abi.ChainEpoch UpgradeDragonHeight abi.ChainEpoch UpgradePhoenixHeight abi.ChainEpoch + UpgradeAussieHeight abi.ChainEpoch } // ChainExportConfig holds configuration for chain ranged exports. diff --git a/api/v0api/full.go b/api/v0api/full.go index b61fc157025..334c5c56dab 100644 --- a/api/v0api/full.go +++ b/api/v0api/full.go @@ -5,14 +5,9 @@ import ( blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - textselector "github.com/ipld/go-ipld-selector-text-lite" - "github.com/libp2p/go-libp2p/core/peer" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" - datatransfer "github.com/filecoin-project/go-data-transfer/v2" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/builtin/v8/paych" verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" @@ -24,9 +19,7 @@ import ( apitypes "github.com/filecoin-project/lotus/api/types" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" - marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/node/repo/imports" ) //go:generate go run github.com/golang/mock/mockgen -destination=v0mocks/mock_full.go -package=v0mocks . FullNode @@ -305,74 +298,6 @@ type FullNode interface { WalletValidateAddress(context.Context, string) (address.Address, error) //perm:read // Other - - // MethodGroup: Client - // The Client methods all have to do with interacting with the storage and - // retrieval markets as a client - - // ClientImport imports file under the specified path into filestore. - ClientImport(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) //perm:admin - // ClientRemoveImport removes file import - ClientRemoveImport(ctx context.Context, importID imports.ID) error //perm:admin - // ClientStartDeal proposes a deal with a miner. - ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) //perm:admin - // ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking. - ClientStatelessDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) //perm:write - // ClientGetDealInfo returns the latest information about a given deal. - ClientGetDealInfo(context.Context, cid.Cid) (*api.DealInfo, error) //perm:read - // ClientListDeals returns information about the deals made by the local client. - ClientListDeals(ctx context.Context) ([]api.DealInfo, error) //perm:write - // ClientGetDealUpdates returns the status of updated deals - ClientGetDealUpdates(ctx context.Context) (<-chan api.DealInfo, error) //perm:write - // ClientGetDealStatus returns status given a code - ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error) //perm:read - // ClientHasLocal indicates whether a certain CID is locally stored. - ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) //perm:write - // ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer). - ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]api.QueryOffer, error) //perm:read - // ClientMinerQueryOffer returns a QueryOffer for the specific miner and file. - ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) //perm:read - // ClientRetrieve initiates the retrieval of a file, as specified in the order. - ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef) error //perm:admin - // ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel - // of status updates. - ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin - // ClientQueryAsk returns a signed StorageAsk from the specified miner. - // ClientListRetrievals returns information about retrievals made by the local client - ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, error) //perm:write - // ClientGetRetrievalUpdates returns status of updated retrieval deals - ClientGetRetrievalUpdates(ctx context.Context) (<-chan api.RetrievalInfo, error) //perm:write - ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) //perm:read - // ClientCalcCommP calculates the CommP and data size of the specified CID - ClientDealPieceCID(ctx context.Context, root cid.Cid) (api.DataCIDSize, error) //perm:read - // ClientCalcCommP calculates the CommP for a specified file - ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet, error) //perm:write - // ClientGenCar generates a CAR file for the specified file. - ClientGenCar(ctx context.Context, ref api.FileRef, outpath string) error //perm:write - // ClientDealSize calculates real deal data size - ClientDealSize(ctx context.Context, root cid.Cid) (api.DataSize, error) //perm:read - // ClientListTransfers returns the status of all ongoing transfers of data - ClientListDataTransfers(ctx context.Context) ([]api.DataTransferChannel, error) //perm:write - ClientDataTransferUpdates(ctx context.Context) (<-chan api.DataTransferChannel, error) //perm:write - // ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer - ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write - // ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer - ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write - // ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel - // which are stuck due to insufficient funds - ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error //perm:write - - // ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID - ClientCancelRetrievalDeal(ctx context.Context, dealid retrievalmarket.DealID) error //perm:write - - // ClientUnimport removes references to the specified file from filestore - // ClientUnimport(path string) - - // ClientListImports lists imported files and their root CIDs - ClientListImports(ctx context.Context) ([]api.Import, error) //perm:write - - // ClientListAsks() []Ask - // MethodGroup: State // The State methods are used to query, inspect, and interact with chain state. // Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset. @@ -743,37 +668,3 @@ type FullNode interface { // the path specified when calling CreateBackup is within the base path CreateBackup(ctx context.Context, fpath string) error //perm:admin } - -func OfferOrder(o api.QueryOffer, client address.Address) RetrievalOrder { - return RetrievalOrder{ - Root: o.Root, - Piece: o.Piece, - Size: o.Size, - Total: o.MinPrice, - UnsealPrice: o.UnsealPrice, - PaymentInterval: o.PaymentInterval, - PaymentIntervalIncrease: o.PaymentIntervalIncrease, - Client: client, - - Miner: o.Miner, - MinerPeer: &o.MinerPeer, - } -} - -type RetrievalOrder struct { - // TODO: make this less unixfs specific - Root cid.Cid - Piece *cid.Cid - DatamodelPathSelector *textselector.Expression - Size uint64 - - FromLocalCAR string // if specified, get data from a local CARv2 file. - // TODO: support offset - Total types.BigInt - UnsealPrice types.BigInt - PaymentInterval uint64 - PaymentIntervalIncrease uint64 - Client address.Address - Miner address.Address - MinerPeer *retrievalmarket.RetrievalPeer -} diff --git a/api/v0api/latest.go b/api/v0api/latest.go index d423f57bc86..d5da794ebcb 100644 --- a/api/v0api/latest.go +++ b/api/v0api/latest.go @@ -6,14 +6,11 @@ import ( type Common = api.Common type Net = api.Net -type CommonNet = api.CommonNet type CommonStruct = api.CommonStruct type CommonStub = api.CommonStub type NetStruct = api.NetStruct type NetStub = api.NetStub -type CommonNetStruct = api.CommonNetStruct -type CommonNetStub = api.CommonNetStub type StorageMiner = api.StorageMiner type StorageMinerStruct = api.StorageMinerStruct diff --git a/api/v0api/proxy_gen.go b/api/v0api/proxy_gen.go index 90c25d4a774..a8756894951 100644 --- a/api/v0api/proxy_gen.go +++ b/api/v0api/proxy_gen.go @@ -7,14 +7,10 @@ import ( blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p/core/peer" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" - datatransfer "github.com/filecoin-project/go-data-transfer/v2" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/builtin/v8/paych" verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" @@ -26,9 +22,7 @@ import ( apitypes "github.com/filecoin-project/lotus/api/types" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" - marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/node/repo/imports" ) var ErrNotSupported = xerrors.New("method not supported") @@ -90,60 +84,6 @@ type FullNodeMethods struct { ChainTipSetWeight func(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) `perm:"read"` - ClientCalcCommP func(p0 context.Context, p1 string) (*api.CommPRet, error) `perm:"write"` - - ClientCancelDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"` - - ClientCancelRetrievalDeal func(p0 context.Context, p1 retrievalmarket.DealID) error `perm:"write"` - - ClientDataTransferUpdates func(p0 context.Context) (<-chan api.DataTransferChannel, error) `perm:"write"` - - ClientDealPieceCID func(p0 context.Context, p1 cid.Cid) (api.DataCIDSize, error) `perm:"read"` - - ClientDealSize func(p0 context.Context, p1 cid.Cid) (api.DataSize, error) `perm:"read"` - - ClientFindData func(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]api.QueryOffer, error) `perm:"read"` - - ClientGenCar func(p0 context.Context, p1 api.FileRef, p2 string) error `perm:"write"` - - ClientGetDealInfo func(p0 context.Context, p1 cid.Cid) (*api.DealInfo, error) `perm:"read"` - - ClientGetDealStatus func(p0 context.Context, p1 uint64) (string, error) `perm:"read"` - - ClientGetDealUpdates func(p0 context.Context) (<-chan api.DealInfo, error) `perm:"write"` - - ClientGetRetrievalUpdates func(p0 context.Context) (<-chan api.RetrievalInfo, error) `perm:"write"` - - ClientHasLocal func(p0 context.Context, p1 cid.Cid) (bool, error) `perm:"write"` - - ClientImport func(p0 context.Context, p1 api.FileRef) (*api.ImportRes, error) `perm:"admin"` - - ClientListDataTransfers func(p0 context.Context) ([]api.DataTransferChannel, error) `perm:"write"` - - ClientListDeals func(p0 context.Context) ([]api.DealInfo, error) `perm:"write"` - - ClientListImports func(p0 context.Context) ([]api.Import, error) `perm:"write"` - - ClientListRetrievals func(p0 context.Context) ([]api.RetrievalInfo, error) `perm:"write"` - - ClientMinerQueryOffer func(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (api.QueryOffer, error) `perm:"read"` - - ClientQueryAsk func(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) `perm:"read"` - - ClientRemoveImport func(p0 context.Context, p1 imports.ID) error `perm:"admin"` - - ClientRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"` - - ClientRetrieve func(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error `perm:"admin"` - - ClientRetrieveTryRestartInsufficientFunds func(p0 context.Context, p1 address.Address) error `perm:"write"` - - ClientRetrieveWithEvents func(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"` - - ClientStartDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"admin"` - - ClientStatelessDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"write"` - CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"` GasEstimateFeeCap func(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"` @@ -796,303 +736,6 @@ func (s *FullNodeStub) ChainTipSetWeight(p0 context.Context, p1 types.TipSetKey) return *new(types.BigInt), ErrNotSupported } -func (s *FullNodeStruct) ClientCalcCommP(p0 context.Context, p1 string) (*api.CommPRet, error) { - if s.Internal.ClientCalcCommP == nil { - return nil, ErrNotSupported - } - return s.Internal.ClientCalcCommP(p0, p1) -} - -func (s *FullNodeStub) ClientCalcCommP(p0 context.Context, p1 string) (*api.CommPRet, error) { - return nil, ErrNotSupported -} - -func (s *FullNodeStruct) ClientCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { - if s.Internal.ClientCancelDataTransfer == nil { - return ErrNotSupported - } - return s.Internal.ClientCancelDataTransfer(p0, p1, p2, p3) -} - -func (s *FullNodeStub) ClientCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { - return ErrNotSupported -} - -func (s *FullNodeStruct) ClientCancelRetrievalDeal(p0 context.Context, p1 retrievalmarket.DealID) error { - if s.Internal.ClientCancelRetrievalDeal == nil { - return ErrNotSupported - } - return s.Internal.ClientCancelRetrievalDeal(p0, p1) -} - -func (s *FullNodeStub) ClientCancelRetrievalDeal(p0 context.Context, p1 retrievalmarket.DealID) error { - return ErrNotSupported -} - -func (s *FullNodeStruct) ClientDataTransferUpdates(p0 context.Context) (<-chan api.DataTransferChannel, error) { - if s.Internal.ClientDataTransferUpdates == nil { - return nil, ErrNotSupported - } - return s.Internal.ClientDataTransferUpdates(p0) -} - -func (s *FullNodeStub) ClientDataTransferUpdates(p0 context.Context) (<-chan api.DataTransferChannel, error) { - return nil, ErrNotSupported -} - -func (s *FullNodeStruct) ClientDealPieceCID(p0 context.Context, p1 cid.Cid) (api.DataCIDSize, error) { - if s.Internal.ClientDealPieceCID == nil { - return *new(api.DataCIDSize), ErrNotSupported - } - return s.Internal.ClientDealPieceCID(p0, p1) -} - -func (s *FullNodeStub) ClientDealPieceCID(p0 context.Context, p1 cid.Cid) (api.DataCIDSize, error) { - return *new(api.DataCIDSize), ErrNotSupported -} - -func (s *FullNodeStruct) ClientDealSize(p0 context.Context, p1 cid.Cid) (api.DataSize, error) { - if s.Internal.ClientDealSize == nil { - return *new(api.DataSize), ErrNotSupported - } - return s.Internal.ClientDealSize(p0, p1) -} - -func (s *FullNodeStub) ClientDealSize(p0 context.Context, p1 cid.Cid) (api.DataSize, error) { - return *new(api.DataSize), ErrNotSupported -} - -func (s *FullNodeStruct) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]api.QueryOffer, error) { - if s.Internal.ClientFindData == nil { - return *new([]api.QueryOffer), ErrNotSupported - } - return s.Internal.ClientFindData(p0, p1, p2) -} - -func (s *FullNodeStub) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]api.QueryOffer, error) { - return *new([]api.QueryOffer), ErrNotSupported -} - -func (s *FullNodeStruct) ClientGenCar(p0 context.Context, p1 api.FileRef, p2 string) error { - if s.Internal.ClientGenCar == nil { - return ErrNotSupported - } - return s.Internal.ClientGenCar(p0, p1, p2) -} - -func (s *FullNodeStub) ClientGenCar(p0 context.Context, p1 api.FileRef, p2 string) error { - return ErrNotSupported -} - -func (s *FullNodeStruct) ClientGetDealInfo(p0 context.Context, p1 cid.Cid) (*api.DealInfo, error) { - if s.Internal.ClientGetDealInfo == nil { - return nil, ErrNotSupported - } - return s.Internal.ClientGetDealInfo(p0, p1) -} - -func (s *FullNodeStub) ClientGetDealInfo(p0 context.Context, p1 cid.Cid) (*api.DealInfo, error) { - return nil, ErrNotSupported -} - -func (s *FullNodeStruct) ClientGetDealStatus(p0 context.Context, p1 uint64) (string, error) { - if s.Internal.ClientGetDealStatus == nil { - return "", ErrNotSupported - } - return s.Internal.ClientGetDealStatus(p0, p1) -} - -func (s *FullNodeStub) ClientGetDealStatus(p0 context.Context, p1 uint64) (string, error) { - return "", ErrNotSupported -} - -func (s *FullNodeStruct) ClientGetDealUpdates(p0 context.Context) (<-chan api.DealInfo, error) { - if s.Internal.ClientGetDealUpdates == nil { - return nil, ErrNotSupported - } - return s.Internal.ClientGetDealUpdates(p0) -} - -func (s *FullNodeStub) ClientGetDealUpdates(p0 context.Context) (<-chan api.DealInfo, error) { - return nil, ErrNotSupported -} - -func (s *FullNodeStruct) ClientGetRetrievalUpdates(p0 context.Context) (<-chan api.RetrievalInfo, error) { - if s.Internal.ClientGetRetrievalUpdates == nil { - return nil, ErrNotSupported - } - return s.Internal.ClientGetRetrievalUpdates(p0) -} - -func (s *FullNodeStub) ClientGetRetrievalUpdates(p0 context.Context) (<-chan api.RetrievalInfo, error) { - return nil, ErrNotSupported -} - -func (s *FullNodeStruct) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) { - if s.Internal.ClientHasLocal == nil { - return false, ErrNotSupported - } - return s.Internal.ClientHasLocal(p0, p1) -} - -func (s *FullNodeStub) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) { - return false, ErrNotSupported -} - -func (s *FullNodeStruct) ClientImport(p0 context.Context, p1 api.FileRef) (*api.ImportRes, error) { - if s.Internal.ClientImport == nil { - return nil, ErrNotSupported - } - return s.Internal.ClientImport(p0, p1) -} - -func (s *FullNodeStub) ClientImport(p0 context.Context, p1 api.FileRef) (*api.ImportRes, error) { - return nil, ErrNotSupported -} - -func (s *FullNodeStruct) ClientListDataTransfers(p0 context.Context) ([]api.DataTransferChannel, error) { - if s.Internal.ClientListDataTransfers == nil { - return *new([]api.DataTransferChannel), ErrNotSupported - } - return s.Internal.ClientListDataTransfers(p0) -} - -func (s *FullNodeStub) ClientListDataTransfers(p0 context.Context) ([]api.DataTransferChannel, error) { - return *new([]api.DataTransferChannel), ErrNotSupported -} - -func (s *FullNodeStruct) ClientListDeals(p0 context.Context) ([]api.DealInfo, error) { - if s.Internal.ClientListDeals == nil { - return *new([]api.DealInfo), ErrNotSupported - } - return s.Internal.ClientListDeals(p0) -} - -func (s *FullNodeStub) ClientListDeals(p0 context.Context) ([]api.DealInfo, error) { - return *new([]api.DealInfo), ErrNotSupported -} - -func (s *FullNodeStruct) ClientListImports(p0 context.Context) ([]api.Import, error) { - if s.Internal.ClientListImports == nil { - return *new([]api.Import), ErrNotSupported - } - return s.Internal.ClientListImports(p0) -} - -func (s *FullNodeStub) ClientListImports(p0 context.Context) ([]api.Import, error) { - return *new([]api.Import), ErrNotSupported -} - -func (s *FullNodeStruct) ClientListRetrievals(p0 context.Context) ([]api.RetrievalInfo, error) { - if s.Internal.ClientListRetrievals == nil { - return *new([]api.RetrievalInfo), ErrNotSupported - } - return s.Internal.ClientListRetrievals(p0) -} - -func (s *FullNodeStub) ClientListRetrievals(p0 context.Context) ([]api.RetrievalInfo, error) { - return *new([]api.RetrievalInfo), ErrNotSupported -} - -func (s *FullNodeStruct) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (api.QueryOffer, error) { - if s.Internal.ClientMinerQueryOffer == nil { - return *new(api.QueryOffer), ErrNotSupported - } - return s.Internal.ClientMinerQueryOffer(p0, p1, p2, p3) -} - -func (s *FullNodeStub) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (api.QueryOffer, error) { - return *new(api.QueryOffer), ErrNotSupported -} - -func (s *FullNodeStruct) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) { - if s.Internal.ClientQueryAsk == nil { - return nil, ErrNotSupported - } - return s.Internal.ClientQueryAsk(p0, p1, p2) -} - -func (s *FullNodeStub) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) { - return nil, ErrNotSupported -} - -func (s *FullNodeStruct) ClientRemoveImport(p0 context.Context, p1 imports.ID) error { - if s.Internal.ClientRemoveImport == nil { - return ErrNotSupported - } - return s.Internal.ClientRemoveImport(p0, p1) -} - -func (s *FullNodeStub) ClientRemoveImport(p0 context.Context, p1 imports.ID) error { - return ErrNotSupported -} - -func (s *FullNodeStruct) ClientRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { - if s.Internal.ClientRestartDataTransfer == nil { - return ErrNotSupported - } - return s.Internal.ClientRestartDataTransfer(p0, p1, p2, p3) -} - -func (s *FullNodeStub) ClientRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { - return ErrNotSupported -} - -func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error { - if s.Internal.ClientRetrieve == nil { - return ErrNotSupported - } - return s.Internal.ClientRetrieve(p0, p1, p2) -} - -func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error { - return ErrNotSupported -} - -func (s *FullNodeStruct) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error { - if s.Internal.ClientRetrieveTryRestartInsufficientFunds == nil { - return ErrNotSupported - } - return s.Internal.ClientRetrieveTryRestartInsufficientFunds(p0, p1) -} - -func (s *FullNodeStub) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error { - return ErrNotSupported -} - -func (s *FullNodeStruct) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { - if s.Internal.ClientRetrieveWithEvents == nil { - return nil, ErrNotSupported - } - return s.Internal.ClientRetrieveWithEvents(p0, p1, p2) -} - -func (s *FullNodeStub) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { - return nil, ErrNotSupported -} - -func (s *FullNodeStruct) ClientStartDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) { - if s.Internal.ClientStartDeal == nil { - return nil, ErrNotSupported - } - return s.Internal.ClientStartDeal(p0, p1) -} - -func (s *FullNodeStub) ClientStartDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) { - return nil, ErrNotSupported -} - -func (s *FullNodeStruct) ClientStatelessDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) { - if s.Internal.ClientStatelessDeal == nil { - return nil, ErrNotSupported - } - return s.Internal.ClientStatelessDeal(p0, p1) -} - -func (s *FullNodeStub) ClientStatelessDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) { - return nil, ErrNotSupported -} - func (s *FullNodeStruct) CreateBackup(p0 context.Context, p1 string) error { if s.Internal.CreateBackup == nil { return ErrNotSupported diff --git a/api/v0api/v0mocks/mock_full.go b/api/v0api/v0mocks/mock_full.go index df67d087656..092d93b67bf 100644 --- a/api/v0api/v0mocks/mock_full.go +++ b/api/v0api/v0mocks/mock_full.go @@ -20,9 +20,6 @@ import ( address "github.com/filecoin-project/go-address" bitfield "github.com/filecoin-project/go-bitfield" - datatransfer "github.com/filecoin-project/go-data-transfer/v2" - retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket" - storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket" auth "github.com/filecoin-project/go-jsonrpc/auth" abi "github.com/filecoin-project/go-state-types/abi" big "github.com/filecoin-project/go-state-types/big" @@ -36,13 +33,10 @@ import ( api "github.com/filecoin-project/lotus/api" apitypes "github.com/filecoin-project/lotus/api/types" - v0api "github.com/filecoin-project/lotus/api/v0api" miner1 "github.com/filecoin-project/lotus/chain/actors/builtin/miner" types "github.com/filecoin-project/lotus/chain/types" alerting "github.com/filecoin-project/lotus/journal/alerting" - marketevents "github.com/filecoin-project/lotus/markets/loggers" dtypes "github.com/filecoin-project/lotus/node/modules/dtypes" - imports "github.com/filecoin-project/lotus/node/repo/imports" ) // MockFullNode is a mock of FullNode interface. @@ -455,404 +449,6 @@ func (mr *MockFullNodeMockRecorder) ChainTipSetWeight(arg0, arg1 interface{}) *g return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainTipSetWeight", reflect.TypeOf((*MockFullNode)(nil).ChainTipSetWeight), arg0, arg1) } -// ClientCalcCommP mocks base method. -func (m *MockFullNode) ClientCalcCommP(arg0 context.Context, arg1 string) (*api.CommPRet, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientCalcCommP", arg0, arg1) - ret0, _ := ret[0].(*api.CommPRet) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientCalcCommP indicates an expected call of ClientCalcCommP. -func (mr *MockFullNodeMockRecorder) ClientCalcCommP(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCalcCommP", reflect.TypeOf((*MockFullNode)(nil).ClientCalcCommP), arg0, arg1) -} - -// ClientCancelDataTransfer mocks base method. -func (m *MockFullNode) ClientCancelDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientCancelDataTransfer", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(error) - return ret0 -} - -// ClientCancelDataTransfer indicates an expected call of ClientCancelDataTransfer. -func (mr *MockFullNodeMockRecorder) ClientCancelDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientCancelDataTransfer), arg0, arg1, arg2, arg3) -} - -// ClientCancelRetrievalDeal mocks base method. -func (m *MockFullNode) ClientCancelRetrievalDeal(arg0 context.Context, arg1 retrievalmarket.DealID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientCancelRetrievalDeal", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ClientCancelRetrievalDeal indicates an expected call of ClientCancelRetrievalDeal. -func (mr *MockFullNodeMockRecorder) ClientCancelRetrievalDeal(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelRetrievalDeal", reflect.TypeOf((*MockFullNode)(nil).ClientCancelRetrievalDeal), arg0, arg1) -} - -// ClientDataTransferUpdates mocks base method. -func (m *MockFullNode) ClientDataTransferUpdates(arg0 context.Context) (<-chan api.DataTransferChannel, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientDataTransferUpdates", arg0) - ret0, _ := ret[0].(<-chan api.DataTransferChannel) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientDataTransferUpdates indicates an expected call of ClientDataTransferUpdates. -func (mr *MockFullNodeMockRecorder) ClientDataTransferUpdates(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDataTransferUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientDataTransferUpdates), arg0) -} - -// ClientDealPieceCID mocks base method. -func (m *MockFullNode) ClientDealPieceCID(arg0 context.Context, arg1 cid.Cid) (api.DataCIDSize, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientDealPieceCID", arg0, arg1) - ret0, _ := ret[0].(api.DataCIDSize) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientDealPieceCID indicates an expected call of ClientDealPieceCID. -func (mr *MockFullNodeMockRecorder) ClientDealPieceCID(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealPieceCID", reflect.TypeOf((*MockFullNode)(nil).ClientDealPieceCID), arg0, arg1) -} - -// ClientDealSize mocks base method. -func (m *MockFullNode) ClientDealSize(arg0 context.Context, arg1 cid.Cid) (api.DataSize, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientDealSize", arg0, arg1) - ret0, _ := ret[0].(api.DataSize) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientDealSize indicates an expected call of ClientDealSize. -func (mr *MockFullNodeMockRecorder) ClientDealSize(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealSize", reflect.TypeOf((*MockFullNode)(nil).ClientDealSize), arg0, arg1) -} - -// ClientFindData mocks base method. -func (m *MockFullNode) ClientFindData(arg0 context.Context, arg1 cid.Cid, arg2 *cid.Cid) ([]api.QueryOffer, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientFindData", arg0, arg1, arg2) - ret0, _ := ret[0].([]api.QueryOffer) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientFindData indicates an expected call of ClientFindData. -func (mr *MockFullNodeMockRecorder) ClientFindData(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientFindData", reflect.TypeOf((*MockFullNode)(nil).ClientFindData), arg0, arg1, arg2) -} - -// ClientGenCar mocks base method. -func (m *MockFullNode) ClientGenCar(arg0 context.Context, arg1 api.FileRef, arg2 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientGenCar", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// ClientGenCar indicates an expected call of ClientGenCar. -func (mr *MockFullNodeMockRecorder) ClientGenCar(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGenCar", reflect.TypeOf((*MockFullNode)(nil).ClientGenCar), arg0, arg1, arg2) -} - -// ClientGetDealInfo mocks base method. -func (m *MockFullNode) ClientGetDealInfo(arg0 context.Context, arg1 cid.Cid) (*api.DealInfo, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientGetDealInfo", arg0, arg1) - ret0, _ := ret[0].(*api.DealInfo) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientGetDealInfo indicates an expected call of ClientGetDealInfo. -func (mr *MockFullNodeMockRecorder) ClientGetDealInfo(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealInfo", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealInfo), arg0, arg1) -} - -// ClientGetDealStatus mocks base method. -func (m *MockFullNode) ClientGetDealStatus(arg0 context.Context, arg1 uint64) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientGetDealStatus", arg0, arg1) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientGetDealStatus indicates an expected call of ClientGetDealStatus. -func (mr *MockFullNodeMockRecorder) ClientGetDealStatus(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealStatus", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealStatus), arg0, arg1) -} - -// ClientGetDealUpdates mocks base method. -func (m *MockFullNode) ClientGetDealUpdates(arg0 context.Context) (<-chan api.DealInfo, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientGetDealUpdates", arg0) - ret0, _ := ret[0].(<-chan api.DealInfo) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientGetDealUpdates indicates an expected call of ClientGetDealUpdates. -func (mr *MockFullNodeMockRecorder) ClientGetDealUpdates(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealUpdates), arg0) -} - -// ClientGetRetrievalUpdates mocks base method. -func (m *MockFullNode) ClientGetRetrievalUpdates(arg0 context.Context) (<-chan api.RetrievalInfo, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientGetRetrievalUpdates", arg0) - ret0, _ := ret[0].(<-chan api.RetrievalInfo) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientGetRetrievalUpdates indicates an expected call of ClientGetRetrievalUpdates. -func (mr *MockFullNodeMockRecorder) ClientGetRetrievalUpdates(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetRetrievalUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientGetRetrievalUpdates), arg0) -} - -// ClientHasLocal mocks base method. -func (m *MockFullNode) ClientHasLocal(arg0 context.Context, arg1 cid.Cid) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientHasLocal", arg0, arg1) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientHasLocal indicates an expected call of ClientHasLocal. -func (mr *MockFullNodeMockRecorder) ClientHasLocal(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientHasLocal", reflect.TypeOf((*MockFullNode)(nil).ClientHasLocal), arg0, arg1) -} - -// ClientImport mocks base method. -func (m *MockFullNode) ClientImport(arg0 context.Context, arg1 api.FileRef) (*api.ImportRes, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientImport", arg0, arg1) - ret0, _ := ret[0].(*api.ImportRes) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientImport indicates an expected call of ClientImport. -func (mr *MockFullNodeMockRecorder) ClientImport(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientImport", reflect.TypeOf((*MockFullNode)(nil).ClientImport), arg0, arg1) -} - -// ClientListDataTransfers mocks base method. -func (m *MockFullNode) ClientListDataTransfers(arg0 context.Context) ([]api.DataTransferChannel, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientListDataTransfers", arg0) - ret0, _ := ret[0].([]api.DataTransferChannel) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientListDataTransfers indicates an expected call of ClientListDataTransfers. -func (mr *MockFullNodeMockRecorder) ClientListDataTransfers(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDataTransfers", reflect.TypeOf((*MockFullNode)(nil).ClientListDataTransfers), arg0) -} - -// ClientListDeals mocks base method. -func (m *MockFullNode) ClientListDeals(arg0 context.Context) ([]api.DealInfo, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientListDeals", arg0) - ret0, _ := ret[0].([]api.DealInfo) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientListDeals indicates an expected call of ClientListDeals. -func (mr *MockFullNodeMockRecorder) ClientListDeals(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDeals", reflect.TypeOf((*MockFullNode)(nil).ClientListDeals), arg0) -} - -// ClientListImports mocks base method. -func (m *MockFullNode) ClientListImports(arg0 context.Context) ([]api.Import, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientListImports", arg0) - ret0, _ := ret[0].([]api.Import) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientListImports indicates an expected call of ClientListImports. -func (mr *MockFullNodeMockRecorder) ClientListImports(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListImports", reflect.TypeOf((*MockFullNode)(nil).ClientListImports), arg0) -} - -// ClientListRetrievals mocks base method. -func (m *MockFullNode) ClientListRetrievals(arg0 context.Context) ([]api.RetrievalInfo, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientListRetrievals", arg0) - ret0, _ := ret[0].([]api.RetrievalInfo) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientListRetrievals indicates an expected call of ClientListRetrievals. -func (mr *MockFullNodeMockRecorder) ClientListRetrievals(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListRetrievals", reflect.TypeOf((*MockFullNode)(nil).ClientListRetrievals), arg0) -} - -// ClientMinerQueryOffer mocks base method. -func (m *MockFullNode) ClientMinerQueryOffer(arg0 context.Context, arg1 address.Address, arg2 cid.Cid, arg3 *cid.Cid) (api.QueryOffer, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientMinerQueryOffer", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(api.QueryOffer) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientMinerQueryOffer indicates an expected call of ClientMinerQueryOffer. -func (mr *MockFullNodeMockRecorder) ClientMinerQueryOffer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientMinerQueryOffer", reflect.TypeOf((*MockFullNode)(nil).ClientMinerQueryOffer), arg0, arg1, arg2, arg3) -} - -// ClientQueryAsk mocks base method. -func (m *MockFullNode) ClientQueryAsk(arg0 context.Context, arg1 peer.ID, arg2 address.Address) (*storagemarket.StorageAsk, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientQueryAsk", arg0, arg1, arg2) - ret0, _ := ret[0].(*storagemarket.StorageAsk) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientQueryAsk indicates an expected call of ClientQueryAsk. -func (mr *MockFullNodeMockRecorder) ClientQueryAsk(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientQueryAsk", reflect.TypeOf((*MockFullNode)(nil).ClientQueryAsk), arg0, arg1, arg2) -} - -// ClientRemoveImport mocks base method. -func (m *MockFullNode) ClientRemoveImport(arg0 context.Context, arg1 imports.ID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientRemoveImport", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ClientRemoveImport indicates an expected call of ClientRemoveImport. -func (mr *MockFullNodeMockRecorder) ClientRemoveImport(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRemoveImport", reflect.TypeOf((*MockFullNode)(nil).ClientRemoveImport), arg0, arg1) -} - -// ClientRestartDataTransfer mocks base method. -func (m *MockFullNode) ClientRestartDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientRestartDataTransfer", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(error) - return ret0 -} - -// ClientRestartDataTransfer indicates an expected call of ClientRestartDataTransfer. -func (mr *MockFullNodeMockRecorder) ClientRestartDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRestartDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientRestartDataTransfer), arg0, arg1, arg2, arg3) -} - -// ClientRetrieve mocks base method. -func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 v0api.RetrievalOrder, arg2 *api.FileRef) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// ClientRetrieve indicates an expected call of ClientRetrieve. -func (mr *MockFullNodeMockRecorder) ClientRetrieve(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieve", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieve), arg0, arg1, arg2) -} - -// ClientRetrieveTryRestartInsufficientFunds mocks base method. -func (m *MockFullNode) ClientRetrieveTryRestartInsufficientFunds(arg0 context.Context, arg1 address.Address) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientRetrieveTryRestartInsufficientFunds", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ClientRetrieveTryRestartInsufficientFunds indicates an expected call of ClientRetrieveTryRestartInsufficientFunds. -func (mr *MockFullNodeMockRecorder) ClientRetrieveTryRestartInsufficientFunds(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveTryRestartInsufficientFunds", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveTryRestartInsufficientFunds), arg0, arg1) -} - -// ClientRetrieveWithEvents mocks base method. -func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 v0api.RetrievalOrder, arg2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientRetrieveWithEvents", arg0, arg1, arg2) - ret0, _ := ret[0].(<-chan marketevents.RetrievalEvent) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientRetrieveWithEvents indicates an expected call of ClientRetrieveWithEvents. -func (mr *MockFullNodeMockRecorder) ClientRetrieveWithEvents(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveWithEvents", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveWithEvents), arg0, arg1, arg2) -} - -// ClientStartDeal mocks base method. -func (m *MockFullNode) ClientStartDeal(arg0 context.Context, arg1 *api.StartDealParams) (*cid.Cid, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientStartDeal", arg0, arg1) - ret0, _ := ret[0].(*cid.Cid) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientStartDeal indicates an expected call of ClientStartDeal. -func (mr *MockFullNodeMockRecorder) ClientStartDeal(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStartDeal", reflect.TypeOf((*MockFullNode)(nil).ClientStartDeal), arg0, arg1) -} - -// ClientStatelessDeal mocks base method. -func (m *MockFullNode) ClientStatelessDeal(arg0 context.Context, arg1 *api.StartDealParams) (*cid.Cid, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientStatelessDeal", arg0, arg1) - ret0, _ := ret[0].(*cid.Cid) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ClientStatelessDeal indicates an expected call of ClientStatelessDeal. -func (mr *MockFullNodeMockRecorder) ClientStatelessDeal(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStatelessDeal", reflect.TypeOf((*MockFullNode)(nil).ClientStatelessDeal), arg0, arg1) -} - // Closing mocks base method. func (m *MockFullNode) Closing(arg0 context.Context) (<-chan struct{}, error) { m.ctrl.T.Helper() diff --git a/api/v0api/v1_wrapper.go b/api/v0api/v1_wrapper.go index 265674e718f..97b8ff597d2 100644 --- a/api/v0api/v1_wrapper.go +++ b/api/v0api/v1_wrapper.go @@ -4,21 +4,16 @@ import ( "context" "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p/core/peer" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/v1api" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" - marketevents "github.com/filecoin-project/lotus/markets/loggers" ) type WrapperV1Full struct { @@ -210,158 +205,10 @@ func (w *WrapperV1Full) ChainGetRandomnessFromBeacon(ctx context.Context, tsk ty return w.StateGetRandomnessFromBeacon(ctx, personalization, randEpoch, entropy, tsk) } -func (w *WrapperV1Full) ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef) error { - events := make(chan marketevents.RetrievalEvent) - go w.clientRetrieve(ctx, order, ref, events) - - for { - select { - case evt, ok := <-events: - if !ok { // done successfully - return nil - } - - if evt.Err != "" { - return xerrors.Errorf("retrieval failed: %s", evt.Err) - } - case <-ctx.Done(): - return xerrors.Errorf("retrieval timed out") - } - } -} - -func (w *WrapperV1Full) ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { - events := make(chan marketevents.RetrievalEvent) - go w.clientRetrieve(ctx, order, ref, events) - return events, nil -} - -func readSubscribeEvents(ctx context.Context, dealID retrievalmarket.DealID, subscribeEvents <-chan api.RetrievalInfo, events chan marketevents.RetrievalEvent) error { - for { - var subscribeEvent api.RetrievalInfo - var evt retrievalmarket.ClientEvent - select { - case <-ctx.Done(): - return xerrors.New("Retrieval Timed Out") - case subscribeEvent = <-subscribeEvents: - if subscribeEvent.ID != dealID { - // we can't check the deal ID ahead of time because: - // 1. We need to subscribe before retrieving. - // 2. We won't know the deal ID until after retrieving. - continue - } - if subscribeEvent.Event != nil { - evt = *subscribeEvent.Event - } - } - - select { - case <-ctx.Done(): - return xerrors.New("Retrieval Timed Out") - case events <- marketevents.RetrievalEvent{ - Event: evt, - Status: subscribeEvent.Status, - BytesReceived: subscribeEvent.BytesReceived, - FundsSpent: subscribeEvent.TotalPaid, - }: - } - - switch subscribeEvent.Status { - case retrievalmarket.DealStatusCompleted: - return nil - case retrievalmarket.DealStatusRejected: - return xerrors.Errorf("Retrieval Proposal Rejected: %s", subscribeEvent.Message) - case - retrievalmarket.DealStatusDealNotFound, - retrievalmarket.DealStatusErrored: - return xerrors.Errorf("Retrieval Error: %s", subscribeEvent.Message) - } - } -} - -func (w *WrapperV1Full) clientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef, events chan marketevents.RetrievalEvent) { - defer close(events) - - finish := func(e error) { - if e != nil { - events <- marketevents.RetrievalEvent{Err: e.Error(), FundsSpent: big.Zero()} - } - } - - var dealID retrievalmarket.DealID - if order.FromLocalCAR == "" { - // Subscribe to events before retrieving to avoid losing events. - subscribeCtx, cancel := context.WithCancel(ctx) - defer cancel() - retrievalEvents, err := w.ClientGetRetrievalUpdates(subscribeCtx) - - if err != nil { - finish(xerrors.Errorf("GetRetrievalUpdates failed: %w", err)) - return - } - - retrievalRes, err := w.FullNode.ClientRetrieve(ctx, api.RetrievalOrder{ - Root: order.Root, - Piece: order.Piece, - Size: order.Size, - Total: order.Total, - UnsealPrice: order.UnsealPrice, - PaymentInterval: order.PaymentInterval, - PaymentIntervalIncrease: order.PaymentIntervalIncrease, - Client: order.Client, - Miner: order.Miner, - MinerPeer: order.MinerPeer, - }) - - if err != nil { - finish(xerrors.Errorf("Retrieve failed: %w", err)) - return - } - - dealID = retrievalRes.DealID - - err = readSubscribeEvents(ctx, retrievalRes.DealID, retrievalEvents, events) - if err != nil { - finish(xerrors.Errorf("Retrieve: %w", err)) - return - } - } - - // If ref is nil, it only fetches the data into the configured blockstore. - if ref == nil { - finish(nil) - return - } - - eref := api.ExportRef{ - Root: order.Root, - FromLocalCAR: order.FromLocalCAR, - DealID: dealID, - } - - if order.DatamodelPathSelector != nil { - s := api.Selector(*order.DatamodelPathSelector) - eref.DAGs = append(eref.DAGs, api.DagSpec{ - DataSelector: &s, - ExportMerkleProof: true, - }) - } - - finish(w.ClientExport(ctx, eref, *ref)) -} - func (w *WrapperV1Full) PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) { return w.FullNode.PaychFund(ctx, from, to, amt) } -func (w *WrapperV1Full) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) { - a, err := w.FullNode.ClientQueryAsk(ctx, p, miner) - if err != nil { - return nil, err - } - return a.Response, nil -} - func (w *WrapperV1Full) BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) { return w.StateGetBeaconEntry(ctx, epoch) } diff --git a/api/v1api/latest.go b/api/v1api/latest.go index a1e63b6ada7..aefb1543b75 100644 --- a/api/v1api/latest.go +++ b/api/v1api/latest.go @@ -12,5 +12,3 @@ type RawFullNodeAPI FullNode func PermissionedFullAPI(a FullNode) FullNode { return api.PermissionedFullAPI(a) } - -type CurioStruct = api.CurioStruct diff --git a/api/version.go b/api/version.go index 124f53dabfb..9c2113578f1 100644 --- a/api/version.go +++ b/api/version.go @@ -59,8 +59,6 @@ var ( MinerAPIVersion0 = newVer(1, 5, 0) WorkerAPIVersion0 = newVer(1, 7, 0) - - CurioAPIVersion0 = newVer(1, 0, 0) ) //nolint:varcheck,deadcode diff --git a/blockstore/splitstore/splitstore_prune.go b/blockstore/splitstore/splitstore_prune.go index 08d5b8cca12..aebc3c00114 100644 --- a/blockstore/splitstore/splitstore_prune.go +++ b/blockstore/splitstore/splitstore_prune.go @@ -47,7 +47,7 @@ var ( PruneThreshold = 7 * build.Finality ) -// GCHotstore runs online GC on the chain state in the hotstore according the to options specified +// GCHotStore runs online GC on the chain state in the hotstore according the to options specified func (s *SplitStore) GCHotStore(opts api.HotGCOpts) error { if opts.Moving { gcOpts := []bstore.BlockstoreGCOption{bstore.WithFullGC(true)} diff --git a/build/actors/v14.tar.zst b/build/actors/v14.tar.zst new file mode 100644 index 00000000000..19b023f1c84 Binary files /dev/null and b/build/actors/v14.tar.zst differ diff --git a/build/builtin_actors.go b/build/builtin_actors.go index 1cc43c3749c..af3167536a1 100644 --- a/build/builtin_actors.go +++ b/build/builtin_actors.go @@ -12,10 +12,10 @@ import ( "strconv" "strings" - "github.com/DataDog/zstd" "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" "github.com/ipld/go-car" + "github.com/klauspost/compress/zstd" "golang.org/x/xerrors" actorstypes "github.com/filecoin-project/go-state-types/actors" @@ -160,7 +160,10 @@ func readEmbeddedBuiltinActorsMetadata(bundle string) ([]*BuiltinActorsMetadata, } defer fi.Close() //nolint - uncompressed := zstd.NewReader(fi) + uncompressed, err := zstd.NewReader(fi) + if err != nil { + return nil, err + } defer uncompressed.Close() //nolint var bundles []*BuiltinActorsMetadata @@ -255,7 +258,10 @@ func GetEmbeddedBuiltinActorsBundle(version actorstypes.Version, networkBundleNa } defer fi.Close() //nolint - uncompressed := zstd.NewReader(fi) + uncompressed, err := zstd.NewReader(fi) + if err != nil { + return nil, false + } defer uncompressed.Close() //nolint tarReader := tar.NewReader(uncompressed) diff --git a/build/builtin_actors_gen.go b/build/builtin_actors_gen.go index 6d046306712..8107e1d5405 100644 --- a/build/builtin_actors_gen.go +++ b/build/builtin_actors_gen.go @@ -117,9 +117,32 @@ var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{{ "system": MustParseCid("bafk2bzacec3vwj2chzaram3iqupkbfiein5h2l5qiltlrngbju2vg5umelclm"), "verifiedregistry": MustParseCid("bafk2bzacedv2irkql7nil3w5v3ohqq3e54w62pxeoppjmaktzokolaaoh5ksu"), }, +}, { + Network: "butterflynet", + Version: 13, + + ManifestCid: MustParseCid("bafy2bzacec75zk7ufzwx6tg5avls5fxdjx5asaqmd2bfqdvkqrkzoxgyflosu"), + Actors: map[string]cid.Cid{ + "account": MustParseCid("bafk2bzacedl533kwbzouqxibejpwp6syfdekvmzy4vmmno6j4iaydbdmv4xek"), + "cron": MustParseCid("bafk2bzacecimv5xnuwyoqgxk26qt4xqpgntleret475pnh35s3vvhqtdct4ow"), + "datacap": MustParseCid("bafk2bzacebpdd4ctavhs7wkcykfahpifct3p4hbptgtf4jfrqcp2trtlygvow"), + "eam": MustParseCid("bafk2bzaceahw5rrgj7prgbnmn237di7ymjz2ssea32wr525jydpfrwpuhs67m"), + "ethaccount": MustParseCid("bafk2bzacebrslcbew5mq3le2zsn36xqxd4gt5hryeoslxnuqwgw3rhuwh6ygu"), + "evm": MustParseCid("bafk2bzaced5smz4lhpem4mbr7igcskv3e5qopbdp7dqshww2qs4ahacgzjzo4"), + "init": MustParseCid("bafk2bzacedgj6hawhdw2ot2ufisci374o2bq6bfkvlvdt6q7s3uoe5ffyv43k"), + "multisig": MustParseCid("bafk2bzacectnnnpwyqiccaymy3h6ghu74ghjrqyhtqv5odfd4opivzebjj6to"), + "paymentchannel": MustParseCid("bafk2bzaceckhx44jawhzhkz6k23gfnv2gcutgb4j4ekhonj2plwaent4b2tpk"), + "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), + "reward": MustParseCid("bafk2bzacebbs3rlg7y3wbvxrj4wgbsqmasw4ksbbr3lyqbkaxj2t25qz6zzuy"), + "storagemarket": MustParseCid("bafk2bzaced3zmxsmlhp2nsiwkxcp2ugonbsebcd53t7htzo2jcoidvu464xmm"), + "storageminer": MustParseCid("bafk2bzacebedx7iaa2ruspxvghkg46ez7un5b7oiijjtnvddq2aot5wk7p7ry"), + "storagepower": MustParseCid("bafk2bzacebvne7m2l3hxxw4xa6oujol75x35yqpnlqiwx74jilyrop4cs7cse"), + "system": MustParseCid("bafk2bzaceacjmlxrvydlud77ilpzbscez46yedx6zjsj6olxsdeuv6d4x4cwe"), + "verifiedregistry": MustParseCid("bafk2bzacebs5muoq7ft2wgqojhjio7a4vltbyprqkmlr43ojlzbil4nwvj3jg"), + }, }, { Network: "butterflynet", - Version: 13, + Version: 14, BundleGitTag: "v13.0.0", ManifestCid: MustParseCid("bafy2bzacec75zk7ufzwx6tg5avls5fxdjx5asaqmd2bfqdvkqrkzoxgyflosu"), Actors: map[string]cid.Cid{ @@ -246,9 +269,32 @@ var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{{ "system": MustParseCid("bafk2bzacecioupndtcnyw6iq2hbrxag3aufvczlv5nobnfbkbywqzcyfaa376"), "verifiedregistry": MustParseCid("bafk2bzaceavldupmf7bimeeacs67z5xdfdlfca6p7sn6bev3mt5ggepfqvhqo"), }, +}, { + Network: "calibrationnet", + Version: 13, + + ManifestCid: MustParseCid("bafy2bzacect4ktyujrwp6mjlsitnpvuw2pbuppz6w52sfljyo4agjevzm75qs"), + Actors: map[string]cid.Cid{ + "account": MustParseCid("bafk2bzaceb3j36ri5y5mfklgp5emlvrms6g4733ss2j3l7jismrxq6ng3tcc6"), + "cron": MustParseCid("bafk2bzaceaz6rocamdxehgpwcbku6wlapwpgzyyvkrploj66mlqptsulf52bs"), + "datacap": MustParseCid("bafk2bzacea22nv5g3yngpxvonqfj4r2nkfk64y6yw2malicm7odk77x7zuads"), + "eam": MustParseCid("bafk2bzaceatqtjzj7623i426noaslouvluhz6e3md3vvquqzku5qj3532uaxg"), + "ethaccount": MustParseCid("bafk2bzacean3hs7ga5csw6g3uu7watxfnqv5uvxviebn3ba6vg4sagwdur5pu"), + "evm": MustParseCid("bafk2bzacec5ibmbtzuzjgwjmksm2n6zfq3gkicxqywwu7tsscqgdzajpfctxk"), + "init": MustParseCid("bafk2bzaced5sq72oemz6qwi6yssxwlos2g54zfprslrx5qfhhx2vlgsbvdpcs"), + "multisig": MustParseCid("bafk2bzacedbgei6jkx36fwdgvoohce4aghvpohqdhoco7p4thszgssms7olv2"), + "paymentchannel": MustParseCid("bafk2bzaceasmgmfsi4mjanxlowsub65fmevhzky4toeqbtw4kp6tmu4kxjpgq"), + "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), + "reward": MustParseCid("bafk2bzacedjyp6ll5ez27dfgldjj4tntxfvyp4pa5zkk7s5uhipzqjyx2gmuc"), + "storagemarket": MustParseCid("bafk2bzaceabolct6qdnefwcrtati2us3sxtxfghyqk6aamfhl6byyefmtssqi"), + "storageminer": MustParseCid("bafk2bzaceckzw3v7wqliyggvjvihz4wywchnnsie4frfvkm3fm5znb64mofri"), + "storagepower": MustParseCid("bafk2bzacea7t4wynzjajl442mpdqbnh3wusjusqtnzgpvefvweh4n2tgzgqhu"), + "system": MustParseCid("bafk2bzacedjnrb5glewazsxpcx6rwiuhl4kwrfcqolyprn6rrjtlzmthlhdq6"), + "verifiedregistry": MustParseCid("bafk2bzacebj2zdquagzy2xxn7up574oemg3w7ed3fe4aujkyhgdwj57voesn2"), + }, }, { Network: "calibrationnet", - Version: 13, + Version: 14, BundleGitTag: "v13.0.0", ManifestCid: MustParseCid("bafy2bzacect4ktyujrwp6mjlsitnpvuw2pbuppz6w52sfljyo4agjevzm75qs"), Actors: map[string]cid.Cid{ @@ -384,9 +430,32 @@ var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{{ "system": MustParseCid("bafk2bzacedye5j5uxox7knb6zlnhseaadztyav76mjbyk5qslhhbpiy5cdtt2"), "verifiedregistry": MustParseCid("bafk2bzacecduww5pirr7dvaijjijw4gf6ygf7vipgxh4scvv6vseo46gueb46"), }, +}, { + Network: "caterpillarnet", + Version: 13, + + ManifestCid: MustParseCid("bafy2bzacedu7kk2zngxp7y3lynhtaht6vgadgn5jzkxe5nuowtwzasnogx63w"), + Actors: map[string]cid.Cid{ + "account": MustParseCid("bafk2bzacecro3uo6ypqhfzwdhnamzcole5qmhrbkx7qny6t2qsrcpqxelt6s2"), + "cron": MustParseCid("bafk2bzaceam3kci46y4siltbw7f4itoap34kp7b7pvn2fco5s2bvnotomwdbe"), + "datacap": MustParseCid("bafk2bzacecmtdspcbqmmjtsaz4vucuqoqjqfsgxjonns7tom7eblkngbcm7bw"), + "eam": MustParseCid("bafk2bzaceaudqhrt7djewopqdnryvwxagfufyt7ja4gdvovrxbh6edh6evgrw"), + "ethaccount": MustParseCid("bafk2bzaced676ds3z6xe333wr7frwq3f2iq5kjwp4okl3te6rne3xf7kuqrwm"), + "evm": MustParseCid("bafk2bzacebeih4jt2s6mel6x4hje7xmnugh6twul2a5axx4iczu7fu4wcdi6k"), + "init": MustParseCid("bafk2bzaceba7vvuzzwj5wqnq2bvpbgtxup53mhr3qybezbllftnxvpqbfymxo"), + "multisig": MustParseCid("bafk2bzaceapkajhnqoczrgry5javqbl7uebgmsbpqqfemzc4yb5q2dqia2qog"), + "paymentchannel": MustParseCid("bafk2bzacebg7xq4ca22gafmdbkcq357x7v6slflib4h3fnj4amsovg6ulqg3o"), + "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), + "reward": MustParseCid("bafk2bzaceajt4idf26ffnyipybcib55fykjxnek7oszkqzi7lu7mbgijmkgos"), + "storagemarket": MustParseCid("bafk2bzaceadfmay7pyl7osjsdmrireafasnjnoziacljy5ewrcsxpp56kzqbw"), + "storageminer": MustParseCid("bafk2bzaceardbn5a7aq5jxl7efr4btmsbl7txnxm4hrrd3llyhujuc2cr5vcs"), + "storagepower": MustParseCid("bafk2bzacear4563jznjqyseoy42xl6kenyqk6umv6xl3bp5bsjb3hbs6sp6bm"), + "system": MustParseCid("bafk2bzacecc5oavxivfnvirx2g7megpdf6lugooyoc2wijloju247xzjcdezy"), + "verifiedregistry": MustParseCid("bafk2bzacebnkdt42mpf5emypo6iroux3hszfh5yt54v2mmnnura3ketholly4"), + }, }, { Network: "caterpillarnet", - Version: 13, + Version: 14, BundleGitTag: "v13.0.0", ManifestCid: MustParseCid("bafy2bzacedu7kk2zngxp7y3lynhtaht6vgadgn5jzkxe5nuowtwzasnogx63w"), Actors: map[string]cid.Cid{ @@ -513,9 +582,32 @@ var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{{ "system": MustParseCid("bafk2bzacecnau5wddulbsvwn75tc3w75jrlvkybgrlxs4ngonqab6xq3eowvg"), "verifiedregistry": MustParseCid("bafk2bzacec37mddea65nvh4htsagtryfa3sq6i67utcupslyhzbhjhoy6hopa"), }, +}, { + Network: "devnet", + Version: 13, + + ManifestCid: MustParseCid("bafy2bzacecn7uxgehrqbcs462ktl2h23u23cmduy2etqj6xrd6tkkja56fna4"), + Actors: map[string]cid.Cid{ + "account": MustParseCid("bafk2bzacebev3fu5geeehpx577b3kvza4xsmmggmepjj7rlsnr27hpoq27q2i"), + "cron": MustParseCid("bafk2bzacedalzqahtuz2bmnf7uawbcujfhhe5xzv5ys5ufadu6ggs3tcu6lsy"), + "datacap": MustParseCid("bafk2bzaceb7ou2vn7ac4xidespoowq2q5w7ognr7s4ujy3xzzgiishajpe7le"), + "eam": MustParseCid("bafk2bzacedqic2qskattorj4svf6mbto2k76ej3ll3ugsyorqramrg7rpq3by"), + "ethaccount": MustParseCid("bafk2bzaceaoad7iknpywijigv2h3jyvkijff2oxvohzue533v5hby3iix5vdu"), + "evm": MustParseCid("bafk2bzacecjgiw26gagsn6a7tffkrgoor4zfgzfokp76u6cwervtmvjbopmwg"), + "init": MustParseCid("bafk2bzaced2obubqojxggeddr246cpwtyzi6knnq52jsvsc2fs3tuk2kh6dtg"), + "multisig": MustParseCid("bafk2bzacebquruzb6zho45orbdkku624t6w6jt4tudaqzraz4yh3li3jfstpg"), + "paymentchannel": MustParseCid("bafk2bzaceaydrilyxvflsuzr24hmw32qwz6sy4hgls73bhpveydcsqskdgpca"), + "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), + "reward": MustParseCid("bafk2bzaceb74owpuzdddqoj2tson6ymbyuguqrnqefyiaxqvwm4ygitpabjrq"), + "storagemarket": MustParseCid("bafk2bzaceaw6dslv6pfqha4ynghq2imij5khnnjrie22kmfgtpie3bvxho6jq"), + "storageminer": MustParseCid("bafk2bzacecsputz6xygjfyrvx2d7bxkpp7b5v4icrmpckec7gnbabx2w377qs"), + "storagepower": MustParseCid("bafk2bzaceceyaa5yjwhxvvcqouob4l746zp5nesivr6enhtpimakdtby6kafi"), + "system": MustParseCid("bafk2bzaceaxg6k5vuozxlemfi5hv663m6jcawzu5puboo4znj73i36e3tsovs"), + "verifiedregistry": MustParseCid("bafk2bzacea2czkb4vt2iiiwdb6e57qfwqse4mk2pcyvwjmdl5ojbnla57oh2u"), + }, }, { Network: "devnet", - Version: 13, + Version: 14, BundleGitTag: "v13.0.0", ManifestCid: MustParseCid("bafy2bzacecn7uxgehrqbcs462ktl2h23u23cmduy2etqj6xrd6tkkja56fna4"), Actors: map[string]cid.Cid{ @@ -665,9 +757,32 @@ var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{{ "system": MustParseCid("bafk2bzacebfqrja2hip7esf4eafxjmu6xcogoqu5xxtgdg7xa5szgvvdguchu"), "verifiedregistry": MustParseCid("bafk2bzacedudgflxc75c77c6zkmfyq4u2xuk7k6xw6dfdccarjrvxx453b77q"), }, +}, { + Network: "mainnet", + Version: 13, + + ManifestCid: MustParseCid("bafy2bzacecdhvfmtirtojwhw2tyciu4jkbpsbk5g53oe24br27oy62sn4dc4e"), + Actors: map[string]cid.Cid{ + "account": MustParseCid("bafk2bzacedxnbtlsqdk76fsfmnhyvsblwyfducerwwtp3mqtx2wbrvs5idl52"), + "cron": MustParseCid("bafk2bzacebbopddyn5csb3fsuhh2an4ttd23x6qnwixgohlirj5ahtcudphyc"), + "datacap": MustParseCid("bafk2bzaceah42tfnhd7xnztawgf46gbvc3m2gudoxshlba2ucmmo2vy67t7ci"), + "eam": MustParseCid("bafk2bzaceb23bhvvcjsth7cn7vp3gbaphrutsaz7v6hkls3ogotzs4bnhm4mk"), + "ethaccount": MustParseCid("bafk2bzaceautge6zhuy6jbj3uldwoxwhpywuon6z3xfvmdbzpbdribc6zzmei"), + "evm": MustParseCid("bafk2bzacedq6v2lyuhgywhlllwmudfj2zufzcauxcsvvd34m2ek5xr55mvh2q"), + "init": MustParseCid("bafk2bzacedr4xacm3fts4vilyeiacjr2hpmwzclyzulbdo24lrfxbtau2wbai"), + "multisig": MustParseCid("bafk2bzacecr5zqarfqak42xqcfeulsxlavcltawsx2fvc7zsjtby6ti4b3wqc"), + "paymentchannel": MustParseCid("bafk2bzacebntdhfmyc24e7tm52ggx5tnw4i3hrr3jmllsepv3mibez4hywsa2"), + "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), + "reward": MustParseCid("bafk2bzacedq4q2kwkruu4xm7rkyygumlbw2yt4nimna2ivea4qarvtkohnuwu"), + "storagemarket": MustParseCid("bafk2bzacebjtoltdviyznpj34hh5qp6u257jnnbjole5rhqfixm7ug3epvrfu"), + "storageminer": MustParseCid("bafk2bzacebf4rrqyk7gcfggggul6nfpzay7f2ordnkwm7z2wcf4mq6r7i77t2"), + "storagepower": MustParseCid("bafk2bzacecjy4dkulvxppg3ocbmeixe2wgg6yxoyjxrm4ko2fm3uhpvfvam6e"), + "system": MustParseCid("bafk2bzacecyf523quuq2kdjfdvyty446z2ounmamtgtgeqnr3ynlu5cqrlt6e"), + "verifiedregistry": MustParseCid("bafk2bzacedkxehp7y7iyukbcje3wbpqcvufisos6exatkanyrbotoecdkrbta"), + }, }, { Network: "mainnet", - Version: 13, + Version: 14, BundleGitTag: "v13.0.0", ManifestCid: MustParseCid("bafy2bzacecdhvfmtirtojwhw2tyciu4jkbpsbk5g53oe24br27oy62sn4dc4e"), Actors: map[string]cid.Cid{ @@ -794,9 +909,32 @@ var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{{ "system": MustParseCid("bafk2bzacecp4roanbxq3bflftlkipsoqqxio5etjjnzxus5pcu7lq43fnxb34"), "verifiedregistry": MustParseCid("bafk2bzaceandytrgcnuvizfi47sijbqh6c243vjtzlzumexm6kjv7s7hye45g"), }, +}, { + Network: "testing", + Version: 13, + + ManifestCid: MustParseCid("bafy2bzacedg47dqxmtgzjch6i42kth72esd7w23gujyd6c6oppg3n6auag5ou"), + Actors: map[string]cid.Cid{ + "account": MustParseCid("bafk2bzaceb3tncntgeqvzzr5fzhvpsc5ntv3tpqrsh4jst4irfyzpkdyigibc"), + "cron": MustParseCid("bafk2bzacecwwasmhixpgtshczm5cfspwciyawc25mrefknqhlxfrd6m57tqmc"), + "datacap": MustParseCid("bafk2bzaceckj66by6eohjrybazh5cymmovgl5bmikpvzki2q7huwk2fweoef2"), + "eam": MustParseCid("bafk2bzaceafzm65wvnaam3775homn4vzsv7odftn5tkifmn44wd2t6gupy63y"), + "ethaccount": MustParseCid("bafk2bzaced4q7m4mha2dsezhwub3ru64rgimkg52t25ul4gnekax6uq7hbkqu"), + "evm": MustParseCid("bafk2bzaceakpknw5cuizil3552jr5z35rs6ijaignjigciswtok67drhzdss6"), + "init": MustParseCid("bafk2bzacec7mbkemwugyg2p4oy2xgnovykk4dnsu5ym4wkreooujvxfsxbo3i"), + "multisig": MustParseCid("bafk2bzacebmftoql6dcyqf54xznwjg2bfgdsi67spqquwslpvvtvcx6qenhz2"), + "paymentchannel": MustParseCid("bafk2bzaceau57wpiiikea6pu5om4ryyqjrxjzfksfl4reqosnohydzv3pf4qq"), + "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), + "reward": MustParseCid("bafk2bzacecvlcdgbqlk3dyfzkcjrywg2th5bmn7ilijifikulpxr4ffcrw23o"), + "storagemarket": MustParseCid("bafk2bzacecgj53dwqla7eiubs2uiza7cgxkxtefxkfpjontj5jxefl3a4i2nq"), + "storageminer": MustParseCid("bafk2bzaceailclue4dba2edjethfjw6ycufcwsx4qjjmgsh77xcyprmogdjvu"), + "storagepower": MustParseCid("bafk2bzaceaqw6dhdjlqovhk3p4lb4sb25i5d6mhln2ir5m7tj6m4fegkgkinw"), + "system": MustParseCid("bafk2bzaceby6aiiosnrtb5kzlmrvd4k3o27oo3idmbd6llydz2uqibbp23pzq"), + "verifiedregistry": MustParseCid("bafk2bzaceadw6mxuyb6ipaq3fhekk7u5osyyiis3c4wbkvysy2ut6qfsua5zs"), + }, }, { Network: "testing", - Version: 13, + Version: 14, BundleGitTag: "v13.0.0", ManifestCid: MustParseCid("bafy2bzacedg47dqxmtgzjch6i42kth72esd7w23gujyd6c6oppg3n6auag5ou"), Actors: map[string]cid.Cid{ @@ -923,9 +1061,32 @@ var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{{ "system": MustParseCid("bafk2bzacecp4roanbxq3bflftlkipsoqqxio5etjjnzxus5pcu7lq43fnxb34"), "verifiedregistry": MustParseCid("bafk2bzaceandytrgcnuvizfi47sijbqh6c243vjtzlzumexm6kjv7s7hye45g"), }, +}, { + Network: "testing-fake-proofs", + Version: 13, + + ManifestCid: MustParseCid("bafy2bzaceaf7fz33sp2i5ag5xg5ompn3dwppqlbwfacrwuvzaqdbqrtni7m5q"), + Actors: map[string]cid.Cid{ + "account": MustParseCid("bafk2bzaceb3tncntgeqvzzr5fzhvpsc5ntv3tpqrsh4jst4irfyzpkdyigibc"), + "cron": MustParseCid("bafk2bzacecwwasmhixpgtshczm5cfspwciyawc25mrefknqhlxfrd6m57tqmc"), + "datacap": MustParseCid("bafk2bzaceckj66by6eohjrybazh5cymmovgl5bmikpvzki2q7huwk2fweoef2"), + "eam": MustParseCid("bafk2bzaceafzm65wvnaam3775homn4vzsv7odftn5tkifmn44wd2t6gupy63y"), + "ethaccount": MustParseCid("bafk2bzaced4q7m4mha2dsezhwub3ru64rgimkg52t25ul4gnekax6uq7hbkqu"), + "evm": MustParseCid("bafk2bzaceakpknw5cuizil3552jr5z35rs6ijaignjigciswtok67drhzdss6"), + "init": MustParseCid("bafk2bzacec7mbkemwugyg2p4oy2xgnovykk4dnsu5ym4wkreooujvxfsxbo3i"), + "multisig": MustParseCid("bafk2bzacedy4vldq4viv6bzzh4fueip3by3axsbgbh655lashddgumknc6pvs"), + "paymentchannel": MustParseCid("bafk2bzaceau57wpiiikea6pu5om4ryyqjrxjzfksfl4reqosnohydzv3pf4qq"), + "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), + "reward": MustParseCid("bafk2bzacecvlcdgbqlk3dyfzkcjrywg2th5bmn7ilijifikulpxr4ffcrw23o"), + "storagemarket": MustParseCid("bafk2bzacecgj53dwqla7eiubs2uiza7cgxkxtefxkfpjontj5jxefl3a4i2nq"), + "storageminer": MustParseCid("bafk2bzaceb6atn3k6yhmskgmc3lgfiwpzpfmaxzacohtnb2hivme2oroycqr6"), + "storagepower": MustParseCid("bafk2bzacedameh56mp2g4y7nprhax5sddbzcmpk5p7l523l45rtn2wjc6ah4e"), + "system": MustParseCid("bafk2bzaceby6aiiosnrtb5kzlmrvd4k3o27oo3idmbd6llydz2uqibbp23pzq"), + "verifiedregistry": MustParseCid("bafk2bzaceadw6mxuyb6ipaq3fhekk7u5osyyiis3c4wbkvysy2ut6qfsua5zs"), + }, }, { Network: "testing-fake-proofs", - Version: 13, + Version: 14, BundleGitTag: "v13.0.0", ManifestCid: MustParseCid("bafy2bzaceaf7fz33sp2i5ag5xg5ompn3dwppqlbwfacrwuvzaqdbqrtni7m5q"), Actors: map[string]cid.Cid{ diff --git a/build/builtin_actors_gen_test.go b/build/builtin_actors_gen_test.go index 1338097f7b2..a2eee0c9e7a 100644 --- a/build/builtin_actors_gen_test.go +++ b/build/builtin_actors_gen_test.go @@ -15,9 +15,9 @@ import ( "strings" "testing" - "github.com/DataDog/zstd" "github.com/ipfs/go-cid" "github.com/ipld/go-car/v2" + "github.com/klauspost/compress/zstd" "github.com/stretchr/testify/require" actorstypes "github.com/filecoin-project/go-state-types/actors" @@ -46,7 +46,9 @@ func TestEmbeddedBuiltinActorsMetadata(t *testing.T) { cachedCar, err := os.Open(fmt.Sprintf("./actors/v%v.tar.zst", version)) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, cachedCar.Close()) }) - tarReader := tar.NewReader(zstd.NewReader(cachedCar)) + zstReader, err := zstd.NewReader(cachedCar) + require.NoError(t, err) + tarReader := tar.NewReader(zstReader) for { header, err := tarReader.Next() if errors.Is(err, io.EOF) { diff --git a/build/openrpc/full.json b/build/openrpc/full.json index 3fda272671c..c56ded102fd 100644 --- a/build/openrpc/full.json +++ b/build/openrpc/full.json @@ -2,7 +2,7 @@ "openrpc": "1.2.6", "info": { "title": "Lotus RPC API", - "version": "1.27.0" + "version": "1.27.1" }, "methods": [ { @@ -37,7 +37,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1655" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1307" } }, { @@ -60,7 +60,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1666" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1318" } }, { @@ -103,7 +103,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1677" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1329" } }, { @@ -214,7 +214,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1699" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1351" } }, { @@ -454,7 +454,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1710" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1362" } }, { @@ -685,7 +685,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1721" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1373" } }, { @@ -784,7 +784,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1732" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1384" } }, { @@ -816,7 +816,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1743" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1395" } }, { @@ -922,7 +922,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1754" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1406" } }, { @@ -1019,7 +1019,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1765" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1417" } }, { @@ -1078,7 +1078,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1776" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1428" } }, { @@ -1171,7 +1171,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1787" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1439" } }, { @@ -1255,7 +1255,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1798" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1450" } }, { @@ -1355,7 +1355,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1809" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1461" } }, { @@ -1411,7 +1411,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1820" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1472" } }, { @@ -1484,7 +1484,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1831" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1483" } }, { @@ -1557,7 +1557,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1842" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1494" } }, { @@ -1604,7 +1604,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1853" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1505" } }, { @@ -1636,7 +1636,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1864" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1516" } }, { @@ -1691,7 +1691,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1875" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1527" } }, { @@ -1743,7 +1743,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1897" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1549" } }, { @@ -1780,7 +1780,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1908" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1560" } }, { @@ -1827,7 +1827,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1919" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1571" } }, { @@ -1874,7 +1874,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1930" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1582" } }, { @@ -1954,7 +1954,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1941" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1593" } }, { @@ -2006,2748 +2006,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1952" - } - }, - { - "name": "Filecoin.ClientCalcCommP", - "description": "```go\nfunc (s *FullNodeStruct) ClientCalcCommP(p0 context.Context, p1 string) (*CommPRet, error) {\n\tif s.Internal.ClientCalcCommP == nil {\n\t\treturn nil, ErrNotSupported\n\t}\n\treturn s.Internal.ClientCalcCommP(p0, p1)\n}\n```", - "summary": "ClientCalcCommP calculates the CommP for a specified file\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "string", - "summary": "", - "schema": { - "examples": [ - "string value" - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "*CommPRet", - "description": "*CommPRet", - "summary": "", - "schema": { - "examples": [ - { - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Size": 1024 - } - ], - "additionalProperties": false, - "properties": { - "Root": { - "title": "Content Identifier", - "type": "string" - }, - "Size": { - "title": "number", - "type": "number" - } - }, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1963" - } - }, - { - "name": "Filecoin.ClientCancelDataTransfer", - "description": "```go\nfunc (s *FullNodeStruct) ClientCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {\n\tif s.Internal.ClientCancelDataTransfer == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.ClientCancelDataTransfer(p0, p1, p2, p3)\n}\n```", - "summary": "ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "datatransfer.TransferID", - "summary": "", - "schema": { - "title": "number", - "description": "Number is a number", - "examples": [ - 3 - ], - "type": [ - "number" - ] - }, - "required": true, - "deprecated": false - }, - { - "name": "p2", - "description": "peer.ID", - "summary": "", - "schema": { - "examples": [ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - }, - { - "name": "p3", - "description": "bool", - "summary": "", - "schema": { - "examples": [ - true - ], - "type": [ - "boolean" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1974" - } - }, - { - "name": "Filecoin.ClientCancelRetrievalDeal", - "description": "```go\nfunc (s *FullNodeStruct) ClientCancelRetrievalDeal(p0 context.Context, p1 retrievalmarket.DealID) error {\n\tif s.Internal.ClientCancelRetrievalDeal == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.ClientCancelRetrievalDeal(p0, p1)\n}\n```", - "summary": "ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "retrievalmarket.DealID", - "summary": "", - "schema": { - "title": "number", - "description": "Number is a number", - "examples": [ - 5 - ], - "type": [ - "number" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1985" - } - }, - { - "name": "Filecoin.ClientDealPieceCID", - "description": "```go\nfunc (s *FullNodeStruct) ClientDealPieceCID(p0 context.Context, p1 cid.Cid) (DataCIDSize, error) {\n\tif s.Internal.ClientDealPieceCID == nil {\n\t\treturn *new(DataCIDSize), ErrNotSupported\n\t}\n\treturn s.Internal.ClientDealPieceCID(p0, p1)\n}\n```", - "summary": "ClientCalcCommP calculates the CommP and data size of the specified CID\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "cid.Cid", - "summary": "", - "schema": { - "title": "Content Identifier", - "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash.", - "examples": [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "DataCIDSize", - "description": "DataCIDSize", - "summary": "", - "schema": { - "examples": [ - { - "PayloadSize": 9, - "PieceSize": 1032, - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - } - ], - "additionalProperties": false, - "properties": { - "PayloadSize": { - "title": "number", - "type": "number" - }, - "PieceCID": { - "title": "Content Identifier", - "type": "string" - }, - "PieceSize": { - "title": "number", - "type": "number" - } - }, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2007" - } - }, - { - "name": "Filecoin.ClientDealSize", - "description": "```go\nfunc (s *FullNodeStruct) ClientDealSize(p0 context.Context, p1 cid.Cid) (DataSize, error) {\n\tif s.Internal.ClientDealSize == nil {\n\t\treturn *new(DataSize), ErrNotSupported\n\t}\n\treturn s.Internal.ClientDealSize(p0, p1)\n}\n```", - "summary": "ClientDealSize calculates real deal data size\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "cid.Cid", - "summary": "", - "schema": { - "title": "Content Identifier", - "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash.", - "examples": [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "DataSize", - "description": "DataSize", - "summary": "", - "schema": { - "examples": [ - { - "PayloadSize": 9, - "PieceSize": 1032 - } - ], - "additionalProperties": false, - "properties": { - "PayloadSize": { - "title": "number", - "type": "number" - }, - "PieceSize": { - "title": "number", - "type": "number" - } - }, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2018" - } - }, - { - "name": "Filecoin.ClientExport", - "description": "```go\nfunc (s *FullNodeStruct) ClientExport(p0 context.Context, p1 ExportRef, p2 FileRef) error {\n\tif s.Internal.ClientExport == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.ClientExport(p0, p1, p2)\n}\n```", - "summary": "ClientExport exports a file stored in the local filestore to a system file\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "ExportRef", - "summary": "", - "schema": { - "examples": [ - { - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "DAGs": [ - { - "DataSelector": "Links/21/Hash/Links/42/Hash", - "ExportMerkleProof": true - } - ], - "FromLocalCAR": "string value", - "DealID": 5 - } - ], - "additionalProperties": false, - "properties": { - "DAGs": { - "items": { - "additionalProperties": false, - "properties": { - "DataSelector": { - "type": "string" - }, - "ExportMerkleProof": { - "type": "boolean" - } - }, - "type": "object" - }, - "type": "array" - }, - "DealID": { - "title": "number", - "type": "number" - }, - "FromLocalCAR": { - "type": "string" - }, - "Root": { - "title": "Content Identifier", - "type": "string" - } - }, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - }, - { - "name": "p2", - "description": "FileRef", - "summary": "", - "schema": { - "examples": [ - { - "Path": "string value", - "IsCAR": true - } - ], - "additionalProperties": false, - "properties": { - "IsCAR": { - "type": "boolean" - }, - "Path": { - "type": "string" - } - }, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2029" - } - }, - { - "name": "Filecoin.ClientFindData", - "description": "```go\nfunc (s *FullNodeStruct) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) {\n\tif s.Internal.ClientFindData == nil {\n\t\treturn *new([]QueryOffer), ErrNotSupported\n\t}\n\treturn s.Internal.ClientFindData(p0, p1, p2)\n}\n```", - "summary": "ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer).\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "cid.Cid", - "summary": "", - "schema": { - "title": "Content Identifier", - "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash.", - "examples": [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - }, - { - "name": "p2", - "description": "*cid.Cid", - "summary": "", - "schema": { - "title": "Content Identifier", - "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash.", - "examples": [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "[]QueryOffer", - "description": "[]QueryOffer", - "summary": "", - "schema": { - "examples": [ - [ - { - "Err": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Piece": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Size": 42, - "MinPrice": "0", - "UnsealPrice": "0", - "PricePerByte": "0", - "PaymentInterval": 42, - "PaymentIntervalIncrease": 42, - "Miner": "f01234", - "MinerPeer": { - "Address": "f01234", - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - } - } - ] - ], - "items": [ - { - "additionalProperties": false, - "properties": { - "Err": { - "type": "string" - }, - "MinPrice": { - "additionalProperties": false, - "type": "object" - }, - "Miner": { - "additionalProperties": false, - "type": "object" - }, - "MinerPeer": { - "additionalProperties": false, - "properties": { - "Address": { - "additionalProperties": false, - "type": "object" - }, - "ID": { - "type": "string" - }, - "PieceCID": { - "title": "Content Identifier", - "type": "string" - } - }, - "type": "object" - }, - "PaymentInterval": { - "title": "number", - "type": "number" - }, - "PaymentIntervalIncrease": { - "title": "number", - "type": "number" - }, - "Piece": { - "title": "Content Identifier", - "type": "string" - }, - "PricePerByte": { - "additionalProperties": false, - "type": "object" - }, - "Root": { - "title": "Content Identifier", - "type": "string" - }, - "Size": { - "title": "number", - "type": "number" - }, - "UnsealPrice": { - "additionalProperties": false, - "type": "object" - } - }, - "type": [ - "object" - ] - } - ], - "type": [ - "array" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2040" - } - }, - { - "name": "Filecoin.ClientGenCar", - "description": "```go\nfunc (s *FullNodeStruct) ClientGenCar(p0 context.Context, p1 FileRef, p2 string) error {\n\tif s.Internal.ClientGenCar == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.ClientGenCar(p0, p1, p2)\n}\n```", - "summary": "ClientGenCar generates a CAR file for the specified file.\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "FileRef", - "summary": "", - "schema": { - "examples": [ - { - "Path": "string value", - "IsCAR": true - } - ], - "additionalProperties": false, - "properties": { - "IsCAR": { - "type": "boolean" - }, - "Path": { - "type": "string" - } - }, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - }, - { - "name": "p2", - "description": "string", - "summary": "", - "schema": { - "examples": [ - "string value" - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2051" - } - }, - { - "name": "Filecoin.ClientGetDealInfo", - "description": "```go\nfunc (s *FullNodeStruct) ClientGetDealInfo(p0 context.Context, p1 cid.Cid) (*DealInfo, error) {\n\tif s.Internal.ClientGetDealInfo == nil {\n\t\treturn nil, ErrNotSupported\n\t}\n\treturn s.Internal.ClientGetDealInfo(p0, p1)\n}\n```", - "summary": "ClientGetDealInfo returns the latest information about a given deal.\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "cid.Cid", - "summary": "", - "schema": { - "title": "Content Identifier", - "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash.", - "examples": [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "*DealInfo", - "description": "*DealInfo", - "summary": "", - "schema": { - "examples": [ - { - "ProposalCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "State": 42, - "Message": "string value", - "DealStages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "ExpectedDuration": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - }, - "Provider": "f01234", - "DataRef": { - "TransferType": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1024, - "RawBlockSize": 42 - }, - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Size": 42, - "PricePerEpoch": "0", - "Duration": 42, - "DealID": 5432, - "CreationTime": "0001-01-01T00:00:00Z", - "Verified": true, - "TransferChannelID": { - "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "ID": 3 - }, - "DataTransfer": { - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } - } - } - ], - "additionalProperties": false, - "properties": { - "CreationTime": { - "format": "date-time", - "type": "string" - }, - "DataRef": { - "additionalProperties": false, - "properties": { - "PieceCid": { - "title": "Content Identifier", - "type": "string" - }, - "PieceSize": { - "title": "number", - "type": "number" - }, - "RawBlockSize": { - "title": "number", - "type": "number" - }, - "Root": { - "title": "Content Identifier", - "type": "string" - }, - "TransferType": { - "type": "string" - } - }, - "type": "object" - }, - "DataTransfer": { - "additionalProperties": false, - "properties": { - "BaseCID": { - "title": "Content Identifier", - "type": "string" - }, - "IsInitiator": { - "type": "boolean" - }, - "IsSender": { - "type": "boolean" - }, - "Message": { - "type": "string" - }, - "OtherPeer": { - "type": "string" - }, - "Stages": { - "additionalProperties": false, - "properties": { - "Stages": { - "items": { - "additionalProperties": false, - "properties": { - "CreatedTime": { - "additionalProperties": false, - "type": "object" - }, - "Description": { - "type": "string" - }, - "Logs": { - "items": { - "additionalProperties": false, - "properties": { - "Log": { - "type": "string" - }, - "UpdatedTime": { - "additionalProperties": false, - "type": "object" - } - }, - "type": "object" - }, - "type": "array" - }, - "Name": { - "type": "string" - }, - "UpdatedTime": { - "additionalProperties": false, - "type": "object" - } - }, - "type": "object" - }, - "type": "array" - } - }, - "type": "object" - }, - "Status": { - "title": "number", - "type": "number" - }, - "TransferID": { - "title": "number", - "type": "number" - }, - "Transferred": { - "title": "number", - "type": "number" - }, - "Voucher": { - "type": "string" - } - }, - "type": "object" - }, - "DealID": { - "title": "number", - "type": "number" - }, - "DealStages": { - "additionalProperties": false, - "properties": { - "Stages": { - "items": { - "additionalProperties": false, - "properties": { - "CreatedTime": { - "additionalProperties": false, - "type": "object" - }, - "Description": { - "type": "string" - }, - "ExpectedDuration": { - "type": "string" - }, - "Logs": { - "items": { - "additionalProperties": false, - "properties": { - "Log": { - "type": "string" - }, - "UpdatedTime": { - "additionalProperties": false, - "type": "object" - } - }, - "type": "object" - }, - "type": "array" - }, - "Name": { - "type": "string" - }, - "UpdatedTime": { - "additionalProperties": false, - "type": "object" - } - }, - "type": "object" - }, - "type": "array" - } - }, - "type": "object" - }, - "Duration": { - "title": "number", - "type": "number" - }, - "Message": { - "type": "string" - }, - "PieceCID": { - "title": "Content Identifier", - "type": "string" - }, - "PricePerEpoch": { - "additionalProperties": false, - "type": "object" - }, - "ProposalCid": { - "title": "Content Identifier", - "type": "string" - }, - "Provider": { - "additionalProperties": false, - "type": "object" - }, - "Size": { - "title": "number", - "type": "number" - }, - "State": { - "title": "number", - "type": "number" - }, - "TransferChannelID": { - "additionalProperties": false, - "properties": { - "ID": { - "title": "number", - "type": "number" - }, - "Initiator": { - "type": "string" - }, - "Responder": { - "type": "string" - } - }, - "type": "object" - }, - "Verified": { - "type": "boolean" - } - }, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2062" - } - }, - { - "name": "Filecoin.ClientGetDealStatus", - "description": "```go\nfunc (s *FullNodeStruct) ClientGetDealStatus(p0 context.Context, p1 uint64) (string, error) {\n\tif s.Internal.ClientGetDealStatus == nil {\n\t\treturn \"\", ErrNotSupported\n\t}\n\treturn s.Internal.ClientGetDealStatus(p0, p1)\n}\n```", - "summary": "ClientGetDealStatus returns status given a code\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "uint64", - "summary": "", - "schema": { - "title": "number", - "description": "Number is a number", - "examples": [ - 42 - ], - "type": [ - "number" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "string", - "description": "string", - "summary": "", - "schema": { - "examples": [ - "string value" - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2073" - } - }, - { - "name": "Filecoin.ClientHasLocal", - "description": "```go\nfunc (s *FullNodeStruct) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) {\n\tif s.Internal.ClientHasLocal == nil {\n\t\treturn false, ErrNotSupported\n\t}\n\treturn s.Internal.ClientHasLocal(p0, p1)\n}\n```", - "summary": "ClientHasLocal indicates whether a certain CID is locally stored.\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "cid.Cid", - "summary": "", - "schema": { - "title": "Content Identifier", - "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash.", - "examples": [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "bool", - "description": "bool", - "summary": "", - "schema": { - "examples": [ - true - ], - "type": [ - "boolean" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2106" - } - }, - { - "name": "Filecoin.ClientImport", - "description": "```go\nfunc (s *FullNodeStruct) ClientImport(p0 context.Context, p1 FileRef) (*ImportRes, error) {\n\tif s.Internal.ClientImport == nil {\n\t\treturn nil, ErrNotSupported\n\t}\n\treturn s.Internal.ClientImport(p0, p1)\n}\n```", - "summary": "ClientImport imports file under the specified path into filestore.\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "FileRef", - "summary": "", - "schema": { - "examples": [ - { - "Path": "string value", - "IsCAR": true - } - ], - "additionalProperties": false, - "properties": { - "IsCAR": { - "type": "boolean" - }, - "Path": { - "type": "string" - } - }, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "*ImportRes", - "description": "*ImportRes", - "summary": "", - "schema": { - "examples": [ - { - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "ImportID": 50 - } - ], - "additionalProperties": false, - "properties": { - "ImportID": { - "title": "number", - "type": "number" - }, - "Root": { - "title": "Content Identifier", - "type": "string" - } - }, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2117" - } - }, - { - "name": "Filecoin.ClientListDataTransfers", - "description": "```go\nfunc (s *FullNodeStruct) ClientListDataTransfers(p0 context.Context) ([]DataTransferChannel, error) {\n\tif s.Internal.ClientListDataTransfers == nil {\n\t\treturn *new([]DataTransferChannel), ErrNotSupported\n\t}\n\treturn s.Internal.ClientListDataTransfers(p0)\n}\n```", - "summary": "ClientListTransfers returns the status of all ongoing transfers of data\n", - "paramStructure": "by-position", - "params": [], - "result": { - "name": "[]DataTransferChannel", - "description": "[]DataTransferChannel", - "summary": "", - "schema": { - "examples": [ - [ - { - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } - } - ] - ], - "items": [ - { - "additionalProperties": false, - "properties": { - "BaseCID": { - "title": "Content Identifier", - "type": "string" - }, - "IsInitiator": { - "type": "boolean" - }, - "IsSender": { - "type": "boolean" - }, - "Message": { - "type": "string" - }, - "OtherPeer": { - "type": "string" - }, - "Stages": { - "additionalProperties": false, - "properties": { - "Stages": { - "items": { - "additionalProperties": false, - "properties": { - "CreatedTime": { - "additionalProperties": false, - "type": "object" - }, - "Description": { - "type": "string" - }, - "Logs": { - "items": { - "additionalProperties": false, - "properties": { - "Log": { - "type": "string" - }, - "UpdatedTime": { - "additionalProperties": false, - "type": "object" - } - }, - "type": "object" - }, - "type": "array" - }, - "Name": { - "type": "string" - }, - "UpdatedTime": { - "additionalProperties": false, - "type": "object" - } - }, - "type": "object" - }, - "type": "array" - } - }, - "type": "object" - }, - "Status": { - "title": "number", - "type": "number" - }, - "TransferID": { - "title": "number", - "type": "number" - }, - "Transferred": { - "title": "number", - "type": "number" - }, - "Voucher": { - "type": "string" - } - }, - "type": [ - "object" - ] - } - ], - "type": [ - "array" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2128" - } - }, - { - "name": "Filecoin.ClientListDeals", - "description": "```go\nfunc (s *FullNodeStruct) ClientListDeals(p0 context.Context) ([]DealInfo, error) {\n\tif s.Internal.ClientListDeals == nil {\n\t\treturn *new([]DealInfo), ErrNotSupported\n\t}\n\treturn s.Internal.ClientListDeals(p0)\n}\n```", - "summary": "ClientListDeals returns information about the deals made by the local client.\n", - "paramStructure": "by-position", - "params": [], - "result": { - "name": "[]DealInfo", - "description": "[]DealInfo", - "summary": "", - "schema": { - "examples": [ - [ - { - "ProposalCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "State": 42, - "Message": "string value", - "DealStages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "ExpectedDuration": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - }, - "Provider": "f01234", - "DataRef": { - "TransferType": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1024, - "RawBlockSize": 42 - }, - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Size": 42, - "PricePerEpoch": "0", - "Duration": 42, - "DealID": 5432, - "CreationTime": "0001-01-01T00:00:00Z", - "Verified": true, - "TransferChannelID": { - "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "ID": 3 - }, - "DataTransfer": { - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } - } - } - ] - ], - "items": [ - { - "additionalProperties": false, - "properties": { - "CreationTime": { - "format": "date-time", - "type": "string" - }, - "DataRef": { - "additionalProperties": false, - "properties": { - "PieceCid": { - "title": "Content Identifier", - "type": "string" - }, - "PieceSize": { - "title": "number", - "type": "number" - }, - "RawBlockSize": { - "title": "number", - "type": "number" - }, - "Root": { - "title": "Content Identifier", - "type": "string" - }, - "TransferType": { - "type": "string" - } - }, - "type": "object" - }, - "DataTransfer": { - "additionalProperties": false, - "properties": { - "BaseCID": { - "title": "Content Identifier", - "type": "string" - }, - "IsInitiator": { - "type": "boolean" - }, - "IsSender": { - "type": "boolean" - }, - "Message": { - "type": "string" - }, - "OtherPeer": { - "type": "string" - }, - "Stages": { - "additionalProperties": false, - "properties": { - "Stages": { - "items": { - "additionalProperties": false, - "properties": { - "CreatedTime": { - "additionalProperties": false, - "type": "object" - }, - "Description": { - "type": "string" - }, - "Logs": { - "items": { - "additionalProperties": false, - "properties": { - "Log": { - "type": "string" - }, - "UpdatedTime": { - "additionalProperties": false, - "type": "object" - } - }, - "type": "object" - }, - "type": "array" - }, - "Name": { - "type": "string" - }, - "UpdatedTime": { - "additionalProperties": false, - "type": "object" - } - }, - "type": "object" - }, - "type": "array" - } - }, - "type": "object" - }, - "Status": { - "title": "number", - "type": "number" - }, - "TransferID": { - "title": "number", - "type": "number" - }, - "Transferred": { - "title": "number", - "type": "number" - }, - "Voucher": { - "type": "string" - } - }, - "type": "object" - }, - "DealID": { - "title": "number", - "type": "number" - }, - "DealStages": { - "additionalProperties": false, - "properties": { - "Stages": { - "items": { - "additionalProperties": false, - "properties": { - "CreatedTime": { - "additionalProperties": false, - "type": "object" - }, - "Description": { - "type": "string" - }, - "ExpectedDuration": { - "type": "string" - }, - "Logs": { - "items": { - "additionalProperties": false, - "properties": { - "Log": { - "type": "string" - }, - "UpdatedTime": { - "additionalProperties": false, - "type": "object" - } - }, - "type": "object" - }, - "type": "array" - }, - "Name": { - "type": "string" - }, - "UpdatedTime": { - "additionalProperties": false, - "type": "object" - } - }, - "type": "object" - }, - "type": "array" - } - }, - "type": "object" - }, - "Duration": { - "title": "number", - "type": "number" - }, - "Message": { - "type": "string" - }, - "PieceCID": { - "title": "Content Identifier", - "type": "string" - }, - "PricePerEpoch": { - "additionalProperties": false, - "type": "object" - }, - "ProposalCid": { - "title": "Content Identifier", - "type": "string" - }, - "Provider": { - "additionalProperties": false, - "type": "object" - }, - "Size": { - "title": "number", - "type": "number" - }, - "State": { - "title": "number", - "type": "number" - }, - "TransferChannelID": { - "additionalProperties": false, - "properties": { - "ID": { - "title": "number", - "type": "number" - }, - "Initiator": { - "type": "string" - }, - "Responder": { - "type": "string" - } - }, - "type": "object" - }, - "Verified": { - "type": "boolean" - } - }, - "type": [ - "object" - ] - } - ], - "type": [ - "array" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2139" - } - }, - { - "name": "Filecoin.ClientListImports", - "description": "```go\nfunc (s *FullNodeStruct) ClientListImports(p0 context.Context) ([]Import, error) {\n\tif s.Internal.ClientListImports == nil {\n\t\treturn *new([]Import), ErrNotSupported\n\t}\n\treturn s.Internal.ClientListImports(p0)\n}\n```", - "summary": "ClientListImports lists imported files and their root CIDs\n", - "paramStructure": "by-position", - "params": [], - "result": { - "name": "[]Import", - "description": "[]Import", - "summary": "", - "schema": { - "examples": [ - [ - { - "Key": 50, - "Err": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Source": "string value", - "FilePath": "string value", - "CARPath": "string value" - } - ] - ], - "items": [ - { - "additionalProperties": false, - "properties": { - "CARPath": { - "type": "string" - }, - "Err": { - "type": "string" - }, - "FilePath": { - "type": "string" - }, - "Key": { - "title": "number", - "type": "number" - }, - "Root": { - "title": "Content Identifier", - "type": "string" - }, - "Source": { - "type": "string" - } - }, - "type": [ - "object" - ] - } - ], - "type": [ - "array" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2150" - } - }, - { - "name": "Filecoin.ClientListRetrievals", - "description": "```go\nfunc (s *FullNodeStruct) ClientListRetrievals(p0 context.Context) ([]RetrievalInfo, error) {\n\tif s.Internal.ClientListRetrievals == nil {\n\t\treturn *new([]RetrievalInfo), ErrNotSupported\n\t}\n\treturn s.Internal.ClientListRetrievals(p0)\n}\n```", - "summary": "ClientListRetrievals returns information about retrievals made by the local client\n", - "paramStructure": "by-position", - "params": [], - "result": { - "name": "[]RetrievalInfo", - "description": "[]RetrievalInfo", - "summary": "", - "schema": { - "examples": [ - [ - { - "PayloadCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "ID": 5, - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PricePerByte": "0", - "UnsealPrice": "0", - "Status": 0, - "Message": "string value", - "Provider": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "BytesReceived": 42, - "BytesPaidFor": 42, - "TotalPaid": "0", - "TransferChannelID": { - "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "ID": 3 - }, - "DataTransfer": { - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } - }, - "Event": 5 - } - ] - ], - "items": [ - { - "additionalProperties": false, - "properties": { - "BytesPaidFor": { - "title": "number", - "type": "number" - }, - "BytesReceived": { - "title": "number", - "type": "number" - }, - "DataTransfer": { - "additionalProperties": false, - "properties": { - "BaseCID": { - "title": "Content Identifier", - "type": "string" - }, - "IsInitiator": { - "type": "boolean" - }, - "IsSender": { - "type": "boolean" - }, - "Message": { - "type": "string" - }, - "OtherPeer": { - "type": "string" - }, - "Stages": { - "additionalProperties": false, - "properties": { - "Stages": { - "items": { - "additionalProperties": false, - "properties": { - "CreatedTime": { - "additionalProperties": false, - "type": "object" - }, - "Description": { - "type": "string" - }, - "Logs": { - "items": { - "additionalProperties": false, - "properties": { - "Log": { - "type": "string" - }, - "UpdatedTime": { - "additionalProperties": false, - "type": "object" - } - }, - "type": "object" - }, - "type": "array" - }, - "Name": { - "type": "string" - }, - "UpdatedTime": { - "additionalProperties": false, - "type": "object" - } - }, - "type": "object" - }, - "type": "array" - } - }, - "type": "object" - }, - "Status": { - "title": "number", - "type": "number" - }, - "TransferID": { - "title": "number", - "type": "number" - }, - "Transferred": { - "title": "number", - "type": "number" - }, - "Voucher": { - "type": "string" - } - }, - "type": "object" - }, - "Event": { - "title": "number", - "type": "number" - }, - "ID": { - "title": "number", - "type": "number" - }, - "Message": { - "type": "string" - }, - "PayloadCID": { - "title": "Content Identifier", - "type": "string" - }, - "PieceCID": { - "title": "Content Identifier", - "type": "string" - }, - "PricePerByte": { - "additionalProperties": false, - "type": "object" - }, - "Provider": { - "type": "string" - }, - "Status": { - "title": "number", - "type": "number" - }, - "TotalPaid": { - "additionalProperties": false, - "type": "object" - }, - "TransferChannelID": { - "additionalProperties": false, - "properties": { - "ID": { - "title": "number", - "type": "number" - }, - "Initiator": { - "type": "string" - }, - "Responder": { - "type": "string" - } - }, - "type": "object" - }, - "UnsealPrice": { - "additionalProperties": false, - "type": "object" - } - }, - "type": [ - "object" - ] - } - ], - "type": [ - "array" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2161" - } - }, - { - "name": "Filecoin.ClientMinerQueryOffer", - "description": "```go\nfunc (s *FullNodeStruct) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (QueryOffer, error) {\n\tif s.Internal.ClientMinerQueryOffer == nil {\n\t\treturn *new(QueryOffer), ErrNotSupported\n\t}\n\treturn s.Internal.ClientMinerQueryOffer(p0, p1, p2, p3)\n}\n```", - "summary": "ClientMinerQueryOffer returns a QueryOffer for the specific miner and file.\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "address.Address", - "summary": "", - "schema": { - "examples": [ - "f01234" - ], - "additionalProperties": false, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - }, - { - "name": "p2", - "description": "cid.Cid", - "summary": "", - "schema": { - "title": "Content Identifier", - "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash.", - "examples": [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - }, - { - "name": "p3", - "description": "*cid.Cid", - "summary": "", - "schema": { - "title": "Content Identifier", - "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash.", - "examples": [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "QueryOffer", - "description": "QueryOffer", - "summary": "", - "schema": { - "examples": [ - { - "Err": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Piece": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Size": 42, - "MinPrice": "0", - "UnsealPrice": "0", - "PricePerByte": "0", - "PaymentInterval": 42, - "PaymentIntervalIncrease": 42, - "Miner": "f01234", - "MinerPeer": { - "Address": "f01234", - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - } - } - ], - "additionalProperties": false, - "properties": { - "Err": { - "type": "string" - }, - "MinPrice": { - "additionalProperties": false, - "type": "object" - }, - "Miner": { - "additionalProperties": false, - "type": "object" - }, - "MinerPeer": { - "additionalProperties": false, - "properties": { - "Address": { - "additionalProperties": false, - "type": "object" - }, - "ID": { - "type": "string" - }, - "PieceCID": { - "title": "Content Identifier", - "type": "string" - } - }, - "type": "object" - }, - "PaymentInterval": { - "title": "number", - "type": "number" - }, - "PaymentIntervalIncrease": { - "title": "number", - "type": "number" - }, - "Piece": { - "title": "Content Identifier", - "type": "string" - }, - "PricePerByte": { - "additionalProperties": false, - "type": "object" - }, - "Root": { - "title": "Content Identifier", - "type": "string" - }, - "Size": { - "title": "number", - "type": "number" - }, - "UnsealPrice": { - "additionalProperties": false, - "type": "object" - } - }, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2172" - } - }, - { - "name": "Filecoin.ClientQueryAsk", - "description": "```go\nfunc (s *FullNodeStruct) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*StorageAsk, error) {\n\tif s.Internal.ClientQueryAsk == nil {\n\t\treturn nil, ErrNotSupported\n\t}\n\treturn s.Internal.ClientQueryAsk(p0, p1, p2)\n}\n```", - "summary": "ClientQueryAsk returns a signed StorageAsk from the specified miner.\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "peer.ID", - "summary": "", - "schema": { - "examples": [ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - }, - { - "name": "p2", - "description": "address.Address", - "summary": "", - "schema": { - "examples": [ - "f01234" - ], - "additionalProperties": false, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "*StorageAsk", - "description": "*StorageAsk", - "summary": "", - "schema": { - "examples": [ - { - "Response": { - "Price": "0", - "VerifiedPrice": "0", - "MinPieceSize": 1032, - "MaxPieceSize": 1032, - "Miner": "f01234", - "Timestamp": 10101, - "Expiry": 10101, - "SeqNo": 42 - }, - "DealProtocols": [ - "string value" - ] - } - ], - "additionalProperties": false, - "properties": { - "DealProtocols": { - "items": { - "type": "string" - }, - "type": "array" - }, - "Response": {} - }, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2183" - } - }, - { - "name": "Filecoin.ClientRemoveImport", - "description": "```go\nfunc (s *FullNodeStruct) ClientRemoveImport(p0 context.Context, p1 imports.ID) error {\n\tif s.Internal.ClientRemoveImport == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.ClientRemoveImport(p0, p1)\n}\n```", - "summary": "ClientRemoveImport removes file import\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "imports.ID", - "summary": "", - "schema": { - "title": "number", - "description": "Number is a number", - "examples": [ - 50 - ], - "type": [ - "number" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2194" - } - }, - { - "name": "Filecoin.ClientRestartDataTransfer", - "description": "```go\nfunc (s *FullNodeStruct) ClientRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {\n\tif s.Internal.ClientRestartDataTransfer == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.ClientRestartDataTransfer(p0, p1, p2, p3)\n}\n```", - "summary": "ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "datatransfer.TransferID", - "summary": "", - "schema": { - "title": "number", - "description": "Number is a number", - "examples": [ - 3 - ], - "type": [ - "number" - ] - }, - "required": true, - "deprecated": false - }, - { - "name": "p2", - "description": "peer.ID", - "summary": "", - "schema": { - "examples": [ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - }, - { - "name": "p3", - "description": "bool", - "summary": "", - "schema": { - "examples": [ - true - ], - "type": [ - "boolean" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2205" - } - }, - { - "name": "Filecoin.ClientRetrieve", - "description": "```go\nfunc (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 RetrievalOrder) (*RestrievalRes, error) {\n\tif s.Internal.ClientRetrieve == nil {\n\t\treturn nil, ErrNotSupported\n\t}\n\treturn s.Internal.ClientRetrieve(p0, p1)\n}\n```", - "summary": "ClientRetrieve initiates the retrieval of a file, as specified in the order.\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "RetrievalOrder", - "summary": "", - "schema": { - "examples": [ - { - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Piece": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "DataSelector": "Links/21/Hash/Links/42/Hash", - "Size": 42, - "Total": "0", - "UnsealPrice": "0", - "PaymentInterval": 42, - "PaymentIntervalIncrease": 42, - "Client": "f01234", - "Miner": "f01234", - "MinerPeer": { - "Address": "f01234", - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - }, - "RemoteStore": "00000000-0000-0000-0000-000000000000" - } - ], - "additionalProperties": false, - "properties": { - "Client": { - "additionalProperties": false, - "type": "object" - }, - "DataSelector": { - "type": "string" - }, - "Miner": { - "additionalProperties": false, - "type": "object" - }, - "MinerPeer": { - "additionalProperties": false, - "properties": { - "Address": { - "additionalProperties": false, - "type": "object" - }, - "ID": { - "type": "string" - }, - "PieceCID": { - "title": "Content Identifier", - "type": "string" - } - }, - "type": "object" - }, - "PaymentInterval": { - "title": "number", - "type": "number" - }, - "PaymentIntervalIncrease": { - "title": "number", - "type": "number" - }, - "Piece": { - "title": "Content Identifier", - "type": "string" - }, - "RemoteStore": { - "items": { - "description": "Number is a number", - "title": "number", - "type": "number" - }, - "maxItems": 16, - "minItems": 16, - "type": "array" - }, - "Root": { - "title": "Content Identifier", - "type": "string" - }, - "Size": { - "title": "number", - "type": "number" - }, - "Total": { - "additionalProperties": false, - "type": "object" - }, - "UnsealPrice": { - "additionalProperties": false, - "type": "object" - } - }, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "*RestrievalRes", - "description": "*RestrievalRes", - "summary": "", - "schema": { - "examples": [ - { - "DealID": 5 - } - ], - "additionalProperties": false, - "properties": { - "DealID": { - "title": "number", - "type": "number" - } - }, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2216" - } - }, - { - "name": "Filecoin.ClientRetrieveTryRestartInsufficientFunds", - "description": "```go\nfunc (s *FullNodeStruct) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error {\n\tif s.Internal.ClientRetrieveTryRestartInsufficientFunds == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.ClientRetrieveTryRestartInsufficientFunds(p0, p1)\n}\n```", - "summary": "ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel\nwhich are stuck due to insufficient funds\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "address.Address", - "summary": "", - "schema": { - "examples": [ - "f01234" - ], - "additionalProperties": false, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2227" - } - }, - { - "name": "Filecoin.ClientRetrieveWait", - "description": "```go\nfunc (s *FullNodeStruct) ClientRetrieveWait(p0 context.Context, p1 retrievalmarket.DealID) error {\n\tif s.Internal.ClientRetrieveWait == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.ClientRetrieveWait(p0, p1)\n}\n```", - "summary": "ClientRetrieveWait waits for retrieval to be complete\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "retrievalmarket.DealID", - "summary": "", - "schema": { - "title": "number", - "description": "Number is a number", - "examples": [ - 5 - ], - "type": [ - "number" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2238" - } - }, - { - "name": "Filecoin.ClientStartDeal", - "description": "```go\nfunc (s *FullNodeStruct) ClientStartDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) {\n\tif s.Internal.ClientStartDeal == nil {\n\t\treturn nil, ErrNotSupported\n\t}\n\treturn s.Internal.ClientStartDeal(p0, p1)\n}\n```", - "summary": "ClientStartDeal proposes a deal with a miner.\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "*StartDealParams", - "summary": "", - "schema": { - "examples": [ - { - "Data": { - "TransferType": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1024, - "RawBlockSize": 42 - }, - "Wallet": "f01234", - "Miner": "f01234", - "EpochPrice": "0", - "MinBlocksDuration": 42, - "ProviderCollateral": "0", - "DealStartEpoch": 10101, - "FastRetrieval": true, - "VerifiedDeal": true - } - ], - "additionalProperties": false, - "properties": { - "Data": { - "additionalProperties": false, - "properties": { - "PieceCid": { - "title": "Content Identifier", - "type": "string" - }, - "PieceSize": { - "title": "number", - "type": "number" - }, - "RawBlockSize": { - "title": "number", - "type": "number" - }, - "Root": { - "title": "Content Identifier", - "type": "string" - }, - "TransferType": { - "type": "string" - } - }, - "type": "object" - }, - "DealStartEpoch": { - "title": "number", - "type": "number" - }, - "EpochPrice": { - "additionalProperties": false, - "type": "object" - }, - "FastRetrieval": { - "type": "boolean" - }, - "MinBlocksDuration": { - "title": "number", - "type": "number" - }, - "Miner": { - "additionalProperties": false, - "type": "object" - }, - "ProviderCollateral": { - "additionalProperties": false, - "type": "object" - }, - "VerifiedDeal": { - "type": "boolean" - }, - "Wallet": { - "additionalProperties": false, - "type": "object" - } - }, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "*cid.Cid", - "description": "*cid.Cid", - "summary": "", - "schema": { - "title": "Content Identifier", - "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash.", - "examples": [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2249" - } - }, - { - "name": "Filecoin.ClientStatelessDeal", - "description": "```go\nfunc (s *FullNodeStruct) ClientStatelessDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) {\n\tif s.Internal.ClientStatelessDeal == nil {\n\t\treturn nil, ErrNotSupported\n\t}\n\treturn s.Internal.ClientStatelessDeal(p0, p1)\n}\n```", - "summary": "ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking.\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "*StartDealParams", - "summary": "", - "schema": { - "examples": [ - { - "Data": { - "TransferType": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1024, - "RawBlockSize": 42 - }, - "Wallet": "f01234", - "Miner": "f01234", - "EpochPrice": "0", - "MinBlocksDuration": 42, - "ProviderCollateral": "0", - "DealStartEpoch": 10101, - "FastRetrieval": true, - "VerifiedDeal": true - } - ], - "additionalProperties": false, - "properties": { - "Data": { - "additionalProperties": false, - "properties": { - "PieceCid": { - "title": "Content Identifier", - "type": "string" - }, - "PieceSize": { - "title": "number", - "type": "number" - }, - "RawBlockSize": { - "title": "number", - "type": "number" - }, - "Root": { - "title": "Content Identifier", - "type": "string" - }, - "TransferType": { - "type": "string" - } - }, - "type": "object" - }, - "DealStartEpoch": { - "title": "number", - "type": "number" - }, - "EpochPrice": { - "additionalProperties": false, - "type": "object" - }, - "FastRetrieval": { - "type": "boolean" - }, - "MinBlocksDuration": { - "title": "number", - "type": "number" - }, - "Miner": { - "additionalProperties": false, - "type": "object" - }, - "ProviderCollateral": { - "additionalProperties": false, - "type": "object" - }, - "VerifiedDeal": { - "type": "boolean" - }, - "Wallet": { - "additionalProperties": false, - "type": "object" - } - }, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "*cid.Cid", - "description": "*cid.Cid", - "summary": "", - "schema": { - "title": "Content Identifier", - "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash.", - "examples": [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2260" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1604" } }, { @@ -4786,7 +2045,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2271" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1615" } }, { @@ -4833,7 +2092,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2282" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1626" } }, { @@ -4888,7 +2147,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2293" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1637" } }, { @@ -4917,7 +2176,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2304" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1648" } }, { @@ -5054,7 +2313,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2315" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1659" } }, { @@ -5083,7 +2342,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2326" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1670" } }, { @@ -5137,7 +2396,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2337" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1681" } }, { @@ -5228,7 +2487,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2348" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1692" } }, { @@ -5256,7 +2515,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2359" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1703" } }, { @@ -5346,7 +2605,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2370" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1714" } }, { @@ -5602,7 +2861,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2381" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1725" } }, { @@ -5847,7 +3106,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2392" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1736" } }, { @@ -5903,7 +3162,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2403" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1747" } }, { @@ -5950,7 +3209,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2414" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1758" } }, { @@ -6048,7 +3307,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2425" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1769" } }, { @@ -6114,7 +3373,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2436" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1780" } }, { @@ -6180,7 +3439,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2447" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1791" } }, { @@ -6289,7 +3548,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2458" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1802" } }, { @@ -6347,7 +3606,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2469" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1813" } }, { @@ -6469,7 +3728,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2480" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1824" } }, { @@ -6543,6 +3802,7 @@ "gas": "0x5", "maxFeePerGas": "0x0", "maxPriorityFeePerGas": "0x0", + "gasPrice": "0x0", "accessList": [ "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e" ], @@ -6598,6 +3858,10 @@ "title": "number", "type": "number" }, + "gasPrice": { + "additionalProperties": false, + "type": "object" + }, "hash": { "items": { "description": "Number is a number", @@ -6673,7 +3937,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2491" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1835" } }, { @@ -6738,6 +4002,7 @@ "gas": "0x5", "maxFeePerGas": "0x0", "maxPriorityFeePerGas": "0x0", + "gasPrice": "0x0", "accessList": [ "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e" ], @@ -6793,6 +4058,10 @@ "title": "number", "type": "number" }, + "gasPrice": { + "additionalProperties": false, + "type": "object" + }, "hash": { "items": { "description": "Number is a number", @@ -6868,7 +4137,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2502" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1846" } }, { @@ -6925,6 +4194,7 @@ "gas": "0x5", "maxFeePerGas": "0x0", "maxPriorityFeePerGas": "0x0", + "gasPrice": "0x0", "accessList": [ "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e" ], @@ -6980,6 +4250,10 @@ "title": "number", "type": "number" }, + "gasPrice": { + "additionalProperties": false, + "type": "object" + }, "hash": { "items": { "description": "Number is a number", @@ -7055,7 +4329,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2513" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1857" } }, { @@ -7129,6 +4403,7 @@ "gas": "0x5", "maxFeePerGas": "0x0", "maxPriorityFeePerGas": "0x0", + "gasPrice": "0x0", "accessList": [ "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e" ], @@ -7184,6 +4459,10 @@ "title": "number", "type": "number" }, + "gasPrice": { + "additionalProperties": false, + "type": "object" + }, "hash": { "items": { "description": "Number is a number", @@ -7259,7 +4538,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2524" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1868" } }, { @@ -7350,7 +4629,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2535" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1879" } }, { @@ -7408,7 +4687,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2546" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1890" } }, { @@ -7666,7 +4945,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2557" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1901" } }, { @@ -7941,7 +5220,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2568" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1912" } }, { @@ -7969,7 +5248,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2579" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1923" } }, { @@ -8007,7 +5286,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2590" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1934" } }, { @@ -8115,7 +5394,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2601" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1945" } }, { @@ -8153,7 +5432,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2612" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1956" } }, { @@ -8182,7 +5461,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2623" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1967" } }, { @@ -8245,7 +5524,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2634" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1978" } }, { @@ -8308,7 +5587,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2645" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1989" } }, { @@ -8353,7 +5632,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2656" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2000" } }, { @@ -8475,7 +5754,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2667" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2011" } }, { @@ -8630,7 +5909,129 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2678" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2022" + } + }, + { + "name": "Filecoin.EthTraceTransaction", + "description": "```go\nfunc (s *FullNodeStruct) EthTraceTransaction(p0 context.Context, p1 string) ([]*ethtypes.EthTraceTransaction, error) {\n\tif s.Internal.EthTraceTransaction == nil {\n\t\treturn *new([]*ethtypes.EthTraceTransaction), ErrNotSupported\n\t}\n\treturn s.Internal.EthTraceTransaction(p0, p1)\n}\n```", + "summary": "Implmements OpenEthereum-compatible API method trace_transaction\n", + "paramStructure": "by-position", + "params": [ + { + "name": "p1", + "description": "string", + "summary": "", + "schema": { + "examples": [ + "string value" + ], + "type": [ + "string" + ] + }, + "required": true, + "deprecated": false + } + ], + "result": { + "name": "[]*ethtypes.EthTraceTransaction", + "description": "[]*ethtypes.EthTraceTransaction", + "summary": "", + "schema": { + "examples": [ + [ + { + "type": "string value", + "error": "string value", + "subtraces": 123, + "traceAddress": [ + 123 + ], + "action": {}, + "result": {}, + "blockHash": "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e", + "blockNumber": 9, + "transactionHash": "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e", + "transactionPosition": 123 + } + ] + ], + "items": [ + { + "additionalProperties": false, + "properties": { + "action": { + "additionalProperties": true, + "type": "object" + }, + "blockHash": { + "items": { + "description": "Number is a number", + "title": "number", + "type": "number" + }, + "maxItems": 32, + "minItems": 32, + "type": "array" + }, + "blockNumber": { + "title": "number", + "type": "number" + }, + "error": { + "type": "string" + }, + "result": { + "additionalProperties": true, + "type": "object" + }, + "subtraces": { + "title": "number", + "type": "number" + }, + "traceAddress": { + "items": { + "description": "Number is a number", + "title": "number", + "type": "number" + }, + "type": "array" + }, + "transactionHash": { + "items": { + "description": "Number is a number", + "title": "number", + "type": "number" + }, + "maxItems": 32, + "minItems": 32, + "type": "array" + }, + "transactionPosition": { + "title": "number", + "type": "number" + }, + "type": { + "type": "string" + } + }, + "type": [ + "object" + ] + } + ], + "type": [ + "array" + ] + }, + "required": true, + "deprecated": false + }, + "deprecated": false, + "externalDocs": { + "description": "Github remote link", + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2033" } }, { @@ -8684,7 +6085,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2689" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2044" } }, { @@ -8738,7 +6139,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2700" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2055" } }, { @@ -8793,7 +6194,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2711" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2066" } }, { @@ -8936,7 +6337,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2722" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2077" } }, { @@ -9063,7 +6464,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2733" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2088" } }, { @@ -9165,7 +6566,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2744" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2099" } }, { @@ -9388,7 +6789,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2755" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2110" } }, { @@ -9571,7 +6972,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2766" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2121" } }, { @@ -9651,7 +7052,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2777" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2132" } }, { @@ -9696,7 +7097,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2788" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2143" } }, { @@ -9752,7 +7153,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2799" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2154" } }, { @@ -9832,7 +7233,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2810" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2165" } }, { @@ -9912,7 +7313,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2821" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2176" } }, { @@ -10397,7 +7798,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2832" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2187" } }, { @@ -10591,7 +7992,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2843" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2198" } }, { @@ -10746,7 +8147,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2854" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2209" } }, { @@ -10995,7 +8396,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2865" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2220" } }, { @@ -11150,7 +8551,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2876" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2231" } }, { @@ -11327,7 +8728,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2887" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2242" } }, { @@ -11425,7 +8826,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2898" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2253" } }, { @@ -11590,7 +8991,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2909" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2264" } }, { @@ -11629,7 +9030,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2920" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2275" } }, { @@ -11694,7 +9095,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2931" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2286" } }, { @@ -11740,7 +9141,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2942" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2297" } }, { @@ -11890,7 +9291,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2953" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2308" } }, { @@ -12027,7 +9428,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2964" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2319" } }, { @@ -12258,7 +9659,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2975" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2330" } }, { @@ -12395,7 +9796,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2986" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2341" } }, { @@ -12560,7 +9961,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2997" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2352" } }, { @@ -12637,7 +10038,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3008" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2363" } }, { @@ -12832,7 +10233,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3030" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2385" } }, { @@ -13011,7 +10412,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3041" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2396" } }, { @@ -13173,7 +10574,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3052" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2407" } }, { @@ -13321,7 +10722,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3063" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2418" } }, { @@ -13549,7 +10950,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3074" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2429" } }, { @@ -13697,7 +11098,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3085" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2440" } }, { @@ -13909,7 +11310,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3096" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2451" } }, { @@ -14115,7 +11516,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3107" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2462" } }, { @@ -14183,7 +11584,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3118" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2473" } }, { @@ -14300,7 +11701,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3129" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2484" } }, { @@ -14391,7 +11792,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3140" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2495" } }, { @@ -14477,7 +11878,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3151" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2506" } }, { @@ -14672,7 +12073,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3162" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2517" } }, { @@ -14834,7 +12235,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3173" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2528" } }, { @@ -15030,7 +12431,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3184" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2539" } }, { @@ -15210,7 +12611,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3195" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2550" } }, { @@ -15373,7 +12774,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3206" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2561" } }, { @@ -15400,7 +12801,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3217" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2572" } }, { @@ -15427,7 +12828,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3228" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2583" } }, { @@ -15526,7 +12927,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3239" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2594" } }, { @@ -15572,7 +12973,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3250" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2605" } }, { @@ -15672,7 +13073,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3261" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2616" } }, { @@ -15788,7 +13189,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3272" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2627" } }, { @@ -15836,7 +13237,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3283" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2638" } }, { @@ -15928,7 +13329,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3294" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2649" } }, { @@ -16043,7 +13444,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3305" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2660" } }, { @@ -16091,7 +13492,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3316" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2671" } }, { @@ -16128,7 +13529,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3327" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2682" } }, { @@ -16400,7 +13801,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3338" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2693" } }, { @@ -16448,7 +13849,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3349" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2704" } }, { @@ -16506,7 +13907,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3360" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2715" } }, { @@ -16711,7 +14112,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3371" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2726" } }, { @@ -16914,7 +14315,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3382" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2737" } }, { @@ -17083,7 +14484,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3393" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2748" } }, { @@ -17287,7 +14688,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3404" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2759" } }, { @@ -17454,7 +14855,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3415" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2770" } }, { @@ -17661,7 +15062,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3426" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2781" } }, { @@ -17729,7 +15130,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3437" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2792" } }, { @@ -17746,7 +15147,7 @@ "title": "number", "description": "Number is a number", "examples": [ - 22 + 23 ], "type": [ "number" @@ -17781,7 +15182,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3448" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2803" } }, { @@ -17798,7 +15199,7 @@ "title": "number", "description": "Number is a number", "examples": [ - 22 + 23 ], "type": [ "number" @@ -17830,7 +15231,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3459" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2814" } }, { @@ -17921,7 +15322,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3470" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2825" } }, { @@ -18427,7 +15828,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3481" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2836" } }, { @@ -18533,7 +15934,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3492" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2847" } }, { @@ -18585,7 +15986,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3503" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2858" } }, { @@ -19137,7 +16538,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3514" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2869" } }, { @@ -19251,7 +16652,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3525" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2880" } }, { @@ -19348,7 +16749,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3536" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2891" } }, { @@ -19448,7 +16849,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3547" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2902" } }, { @@ -19536,7 +16937,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3558" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2913" } }, { @@ -19636,7 +17037,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3569" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2924" } }, { @@ -19723,7 +17124,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3580" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2935" } }, { @@ -19814,7 +17215,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3591" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2946" } }, { @@ -19939,7 +17340,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3602" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2957" } }, { @@ -20048,7 +17449,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3613" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2968" } }, { @@ -20118,7 +17519,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3624" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2979" } }, { @@ -20221,7 +17622,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3635" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2990" } }, { @@ -20282,7 +17683,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3646" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3001" } }, { @@ -20412,7 +17813,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3657" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3012" } }, { @@ -20519,7 +17920,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3668" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3023" } }, { @@ -20569,7 +17970,8 @@ "UpgradeThunderHeight": 10101, "UpgradeWatermelonHeight": 10101, "UpgradeDragonHeight": 10101, - "UpgradePhoenixHeight": 10101 + "UpgradePhoenixHeight": 10101, + "UpgradeAussieHeight": 10101 }, "Eip155ChainID": 123 } @@ -20599,6 +18001,10 @@ "title": "number", "type": "number" }, + "UpgradeAussieHeight": { + "title": "number", + "type": "number" + }, "UpgradeBreezeHeight": { "title": "number", "type": "number" @@ -20728,7 +18134,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3679" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3034" } }, { @@ -20805,7 +18211,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3690" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3045" } }, { @@ -20882,7 +18288,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3701" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3056" } }, { @@ -20991,7 +18397,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3712" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3067" } }, { @@ -21100,7 +18506,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3723" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3078" } }, { @@ -21161,7 +18567,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3734" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3089" } }, { @@ -21271,7 +18677,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3745" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3100" } }, { @@ -21332,7 +18738,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3756" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3111" } }, { @@ -21400,7 +18806,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3767" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3122" } }, { @@ -21468,7 +18874,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3778" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3133" } }, { @@ -21549,7 +18955,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3789" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3144" } }, { @@ -21606,6 +19012,7 @@ "ClientCollateral": "0" }, "State": { + "SectorNumber": 9, "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, "SlashEpoch": 10101 @@ -21673,6 +19080,10 @@ "title": "number", "type": "number" }, + "SectorNumber": { + "title": "number", + "type": "number" + }, "SectorStartEpoch": { "title": "number", "type": "number" @@ -21698,7 +19109,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3800" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3155" } }, { @@ -21770,7 +19181,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3811" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3166" } }, { @@ -21843,6 +19254,7 @@ "ClientCollateral": "0" }, "State": { + "SectorNumber": 9, "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, "SlashEpoch": 10101 @@ -21907,6 +19319,10 @@ "title": "number", "type": "number" }, + "SectorNumber": { + "title": "number", + "type": "number" + }, "SectorStartEpoch": { "title": "number", "type": "number" @@ -21929,7 +19345,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3822" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3177" } }, { @@ -22094,7 +19510,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3833" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3188" } }, { @@ -22164,7 +19580,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3844" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3199" } }, { @@ -22232,7 +19648,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3855" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3210" } }, { @@ -22325,7 +19741,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3866" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3221" } }, { @@ -22396,7 +19812,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3877" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3232" } }, { @@ -22597,7 +20013,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3888" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3243" } }, { @@ -22729,7 +20145,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3899" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3254" } }, { @@ -22866,7 +20282,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3910" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3265" } }, { @@ -22977,7 +20393,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3921" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3276" } }, { @@ -23109,7 +20525,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3932" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3287" } }, { @@ -23240,7 +20656,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3943" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3298" } }, { @@ -23311,7 +20727,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3954" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3309" } }, { @@ -23395,7 +20811,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3965" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3320" } }, { @@ -23481,7 +20897,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3976" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3331" } }, { @@ -23664,7 +21080,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3987" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3342" } }, { @@ -23691,7 +21107,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3998" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3353" } }, { @@ -23732,7 +21148,7 @@ "title": "number", "description": "Number is a number", "examples": [ - 22 + 23 ], "type": [ "number" @@ -23744,7 +21160,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4009" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3364" } }, { @@ -23832,7 +21248,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4020" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3375" } }, { @@ -24283,7 +21699,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4031" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3386" } }, { @@ -24450,7 +21866,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4042" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3397" } }, { @@ -24548,7 +21964,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4053" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3408" } }, { @@ -24721,7 +22137,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4064" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3419" } }, { @@ -24819,7 +22235,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4075" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3430" } }, { @@ -24970,7 +22386,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4086" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3441" } }, { @@ -25055,7 +22471,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4097" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3452" } }, { @@ -25123,7 +22539,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4108" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3463" } }, { @@ -25175,7 +22591,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4119" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3474" } }, { @@ -25243,7 +22659,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4130" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3485" } }, { @@ -25404,7 +22820,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4141" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3496" } }, { @@ -25451,7 +22867,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4163" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3518" } }, { @@ -25498,7 +22914,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4174" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3529" } }, { @@ -25541,7 +22957,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4196" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3551" } }, { @@ -25637,7 +23053,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4207" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3562" } }, { @@ -25903,7 +23319,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4218" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3573" } }, { @@ -25926,7 +23342,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4229" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3584" } }, { @@ -25969,7 +23385,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4240" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3595" } }, { @@ -26020,7 +23436,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4251" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3606" } }, { @@ -26065,7 +23481,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4262" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3617" } }, { @@ -26093,7 +23509,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4273" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3628" } }, { @@ -26133,7 +23549,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4284" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3639" } }, { @@ -26192,7 +23608,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4295" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3650" } }, { @@ -26236,7 +23652,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4306" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3661" } }, { @@ -26295,7 +23711,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4317" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3672" } }, { @@ -26332,7 +23748,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4328" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3683" } }, { @@ -26376,7 +23792,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4339" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3694" } }, { @@ -26416,7 +23832,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4350" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3705" } }, { @@ -26491,7 +23907,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4361" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3716" } }, { @@ -26699,7 +24115,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4372" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3727" } }, { @@ -26743,7 +24159,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4383" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3738" } }, { @@ -26833,7 +24249,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4394" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3749" } }, { @@ -26860,7 +24276,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4405" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3760" } } ] diff --git a/build/openrpc/full.json.gz b/build/openrpc/full.json.gz deleted file mode 100644 index 08dda7784c6..00000000000 Binary files a/build/openrpc/full.json.gz and /dev/null differ diff --git a/build/openrpc/gateway.json b/build/openrpc/gateway.json index 3538f25cf5a..cc702401b35 100644 --- a/build/openrpc/gateway.json +++ b/build/openrpc/gateway.json @@ -2,7 +2,7 @@ "openrpc": "1.2.6", "info": { "title": "Lotus RPC API", - "version": "1.27.0" + "version": "1.27.1" }, "methods": [ { @@ -242,7 +242,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4416" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3771" } }, { @@ -473,7 +473,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4427" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3782" } }, { @@ -572,7 +572,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4438" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3793" } }, { @@ -604,7 +604,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4449" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3804" } }, { @@ -710,7 +710,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4460" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3815" } }, { @@ -803,7 +803,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4471" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3826" } }, { @@ -887,7 +887,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4482" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3837" } }, { @@ -987,7 +987,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4493" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3848" } }, { @@ -1043,7 +1043,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4504" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3859" } }, { @@ -1116,7 +1116,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4515" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3870" } }, { @@ -1189,7 +1189,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4526" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3881" } }, { @@ -1236,7 +1236,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4537" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3892" } }, { @@ -1268,7 +1268,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4548" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3903" } }, { @@ -1305,7 +1305,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4570" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3925" } }, { @@ -1352,7 +1352,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4581" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3936" } }, { @@ -1392,7 +1392,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4592" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3947" } }, { @@ -1439,7 +1439,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4603" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3958" } }, { @@ -1494,7 +1494,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4614" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3969" } }, { @@ -1523,7 +1523,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4625" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3980" } }, { @@ -1660,7 +1660,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4636" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3991" } }, { @@ -1689,7 +1689,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4647" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4002" } }, { @@ -1743,7 +1743,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4658" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4013" } }, { @@ -1834,7 +1834,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4669" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4024" } }, { @@ -1862,7 +1862,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4680" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4035" } }, { @@ -1952,7 +1952,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4691" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4046" } }, { @@ -2208,7 +2208,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4702" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4057" } }, { @@ -2453,7 +2453,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4713" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4068" } }, { @@ -2509,7 +2509,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4724" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4079" } }, { @@ -2556,7 +2556,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4735" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4090" } }, { @@ -2654,7 +2654,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4746" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4101" } }, { @@ -2720,7 +2720,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4757" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4112" } }, { @@ -2786,7 +2786,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4768" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4123" } }, { @@ -2895,7 +2895,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4779" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4134" } }, { @@ -2953,7 +2953,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4790" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4145" } }, { @@ -3075,7 +3075,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4801" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4156" } }, { @@ -3132,6 +3132,7 @@ "gas": "0x5", "maxFeePerGas": "0x0", "maxPriorityFeePerGas": "0x0", + "gasPrice": "0x0", "accessList": [ "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e" ], @@ -3187,6 +3188,10 @@ "title": "number", "type": "number" }, + "gasPrice": { + "additionalProperties": false, + "type": "object" + }, "hash": { "items": { "description": "Number is a number", @@ -3262,7 +3267,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4812" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4167" } }, { @@ -3336,6 +3341,7 @@ "gas": "0x5", "maxFeePerGas": "0x0", "maxPriorityFeePerGas": "0x0", + "gasPrice": "0x0", "accessList": [ "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e" ], @@ -3391,6 +3397,10 @@ "title": "number", "type": "number" }, + "gasPrice": { + "additionalProperties": false, + "type": "object" + }, "hash": { "items": { "description": "Number is a number", @@ -3466,7 +3476,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4823" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4178" } }, { @@ -3557,7 +3567,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4834" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4189" } }, { @@ -3615,7 +3625,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4845" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4200" } }, { @@ -3873,7 +3883,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4856" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4211" } }, { @@ -4148,7 +4158,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4867" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4222" } }, { @@ -4176,7 +4186,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4878" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4233" } }, { @@ -4214,7 +4224,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4889" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4244" } }, { @@ -4322,7 +4332,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4900" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4255" } }, { @@ -4360,7 +4370,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4911" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4266" } }, { @@ -4389,7 +4399,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4922" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4277" } }, { @@ -4452,7 +4462,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4933" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4288" } }, { @@ -4515,7 +4525,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4944" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4299" } }, { @@ -4560,7 +4570,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4955" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4310" } }, { @@ -4682,7 +4692,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4966" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4321" } }, { @@ -4837,7 +4847,129 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4977" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4332" + } + }, + { + "name": "Filecoin.EthTraceTransaction", + "description": "```go\nfunc (s *GatewayStruct) EthTraceTransaction(p0 context.Context, p1 string) ([]*ethtypes.EthTraceTransaction, error) {\n\tif s.Internal.EthTraceTransaction == nil {\n\t\treturn *new([]*ethtypes.EthTraceTransaction), ErrNotSupported\n\t}\n\treturn s.Internal.EthTraceTransaction(p0, p1)\n}\n```", + "summary": "There are not yet any comments for this method.", + "paramStructure": "by-position", + "params": [ + { + "name": "p1", + "description": "string", + "summary": "", + "schema": { + "examples": [ + "string value" + ], + "type": [ + "string" + ] + }, + "required": true, + "deprecated": false + } + ], + "result": { + "name": "[]*ethtypes.EthTraceTransaction", + "description": "[]*ethtypes.EthTraceTransaction", + "summary": "", + "schema": { + "examples": [ + [ + { + "type": "string value", + "error": "string value", + "subtraces": 123, + "traceAddress": [ + 123 + ], + "action": {}, + "result": {}, + "blockHash": "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e", + "blockNumber": 9, + "transactionHash": "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e", + "transactionPosition": 123 + } + ] + ], + "items": [ + { + "additionalProperties": false, + "properties": { + "action": { + "additionalProperties": true, + "type": "object" + }, + "blockHash": { + "items": { + "description": "Number is a number", + "title": "number", + "type": "number" + }, + "maxItems": 32, + "minItems": 32, + "type": "array" + }, + "blockNumber": { + "title": "number", + "type": "number" + }, + "error": { + "type": "string" + }, + "result": { + "additionalProperties": true, + "type": "object" + }, + "subtraces": { + "title": "number", + "type": "number" + }, + "traceAddress": { + "items": { + "description": "Number is a number", + "title": "number", + "type": "number" + }, + "type": "array" + }, + "transactionHash": { + "items": { + "description": "Number is a number", + "title": "number", + "type": "number" + }, + "maxItems": 32, + "minItems": 32, + "type": "array" + }, + "transactionPosition": { + "title": "number", + "type": "number" + }, + "type": { + "type": "string" + } + }, + "type": [ + "object" + ] + } + ], + "type": [ + "array" + ] + }, + "required": true, + "deprecated": false + }, + "deprecated": false, + "externalDocs": { + "description": "Github remote link", + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4343" } }, { @@ -4891,7 +5023,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4988" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4354" } }, { @@ -4945,7 +5077,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4999" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4365" } }, { @@ -5000,7 +5132,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5010" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4376" } }, { @@ -5102,7 +5234,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5021" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4387" } }, { @@ -5325,7 +5457,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5032" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4398" } }, { @@ -5508,7 +5640,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5043" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4409" } }, { @@ -5702,7 +5834,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5054" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4420" } }, { @@ -5748,7 +5880,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5065" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4431" } }, { @@ -5898,7 +6030,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5076" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4442" } }, { @@ -6035,7 +6167,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5087" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4453" } }, { @@ -6103,7 +6235,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5098" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4464" } }, { @@ -6220,7 +6352,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5109" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4475" } }, { @@ -6311,7 +6443,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5120" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4486" } }, { @@ -6397,7 +6529,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5131" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4497" } }, { @@ -6424,7 +6556,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5142" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4508" } }, { @@ -6451,7 +6583,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5153" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4519" } }, { @@ -6519,7 +6651,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5164" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4530" } }, { @@ -7025,7 +7157,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5175" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4541" } }, { @@ -7122,7 +7254,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5186" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4552" } }, { @@ -7222,7 +7354,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5197" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4563" } }, { @@ -7322,7 +7454,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5208" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4574" } }, { @@ -7447,7 +7579,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5219" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4585" } }, { @@ -7556,7 +7688,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5230" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4596" } }, { @@ -7659,7 +7791,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5241" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4607" } }, { @@ -7789,7 +7921,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5252" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4618" } }, { @@ -7896,7 +8028,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5263" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4629" } }, { @@ -7957,7 +8089,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5274" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4640" } }, { @@ -8025,7 +8157,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5285" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4651" } }, { @@ -8106,7 +8238,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5296" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4662" } }, { @@ -8179,6 +8311,7 @@ "ClientCollateral": "0" }, "State": { + "SectorNumber": 9, "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, "SlashEpoch": 10101 @@ -8243,6 +8376,10 @@ "title": "number", "type": "number" }, + "SectorNumber": { + "title": "number", + "type": "number" + }, "SectorStartEpoch": { "title": "number", "type": "number" @@ -8265,7 +8402,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5307" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4673" } }, { @@ -8358,7 +8495,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5318" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4684" } }, { @@ -8559,7 +8696,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5329" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4695" } }, { @@ -8670,7 +8807,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5340" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4706" } }, { @@ -8801,7 +8938,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5351" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4717" } }, { @@ -8887,7 +9024,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5362" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4728" } }, { @@ -8914,7 +9051,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5373" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4739" } }, { @@ -8955,7 +9092,7 @@ "title": "number", "description": "Number is a number", "examples": [ - 22 + 23 ], "type": [ "number" @@ -8967,7 +9104,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5384" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4750" } }, { @@ -9055,7 +9192,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5395" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4761" } }, { @@ -9506,7 +9643,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5406" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4772" } }, { @@ -9673,7 +9810,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5417" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4783" } }, { @@ -9846,7 +9983,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5428" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4794" } }, { @@ -9914,7 +10051,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5439" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4805" } }, { @@ -9982,7 +10119,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5450" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4816" } }, { @@ -10143,7 +10280,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5461" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4827" } }, { @@ -10188,7 +10325,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5483" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4849" } }, { @@ -10233,7 +10370,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5494" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4860" } }, { @@ -10260,7 +10397,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5505" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4871" } } ] diff --git a/build/openrpc/gateway.json.gz b/build/openrpc/gateway.json.gz deleted file mode 100644 index 9f5dc38ee68..00000000000 Binary files a/build/openrpc/gateway.json.gz and /dev/null differ diff --git a/build/openrpc/miner.json b/build/openrpc/miner.json index d9f2f514548..c5390cbe035 100644 --- a/build/openrpc/miner.json +++ b/build/openrpc/miner.json @@ -2,7 +2,7 @@ "openrpc": "1.2.6", "info": { "title": "Lotus RPC API", - "version": "1.27.0" + "version": "1.27.1" }, "methods": [ { @@ -30,7 +30,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5791" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5157" } }, { @@ -109,7 +109,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5802" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5168" } }, { @@ -155,7 +155,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5813" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5179" } }, { @@ -203,7 +203,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5824" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5190" } }, { @@ -251,7 +251,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5835" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5201" } }, { @@ -354,7 +354,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5846" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5212" } }, { @@ -428,7 +428,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5857" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5223" } }, { @@ -538,7 +538,7 @@ "title": "number", "description": "Number is a number", "examples": [ - 22 + 23 ], "type": [ "number" @@ -591,7 +591,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5868" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5234" } }, { @@ -742,7 +742,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5879" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5245" } }, { @@ -781,117 +781,44 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5890" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5256" } }, { - "name": "Filecoin.DagstoreGC", - "description": "```go\nfunc (s *StorageMinerStruct) DagstoreGC(p0 context.Context) ([]DagstoreShardResult, error) {\n\tif s.Internal.DagstoreGC == nil {\n\t\treturn *new([]DagstoreShardResult), ErrNotSupported\n\t}\n\treturn s.Internal.DagstoreGC(p0)\n}\n```", - "summary": "DagstoreGC runs garbage collection on the DAG store.\n", + "name": "Filecoin.MarketListDeals", + "description": "```go\nfunc (s *StorageMinerStruct) MarketListDeals(p0 context.Context) ([]*MarketDeal, error) {\n\tif s.Internal.MarketListDeals == nil {\n\t\treturn *new([]*MarketDeal), ErrNotSupported\n\t}\n\treturn s.Internal.MarketListDeals(p0)\n}\n```", + "summary": "", "paramStructure": "by-position", "params": [], "result": { - "name": "[]DagstoreShardResult", - "description": "[]DagstoreShardResult", + "name": "[]*MarketDeal", + "description": "[]*MarketDeal", "summary": "", "schema": { "examples": [ [ { - "Key": "baga6ea4seaqecmtz7iak33dsfshi627abz4i4665dfuzr3qfs4bmad6dx3iigdq", - "Success": false, - "Error": "\u003cerror\u003e" - } - ] - ], - "items": [ - { - "additionalProperties": false, - "properties": { - "Error": { - "type": "string" - }, - "Key": { - "type": "string" + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" }, - "Success": { - "type": "boolean" + "State": { + "SectorNumber": 9, + "SectorStartEpoch": 10101, + "LastUpdatedEpoch": 10101, + "SlashEpoch": 10101 } - }, - "type": [ - "object" - ] - } - ], - "type": [ - "array" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5901" - } - }, - { - "name": "Filecoin.DagstoreInitializeShard", - "description": "```go\nfunc (s *StorageMinerStruct) DagstoreInitializeShard(p0 context.Context, p1 string) error {\n\tif s.Internal.DagstoreInitializeShard == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.DagstoreInitializeShard(p0, p1)\n}\n```", - "summary": "DagstoreInitializeShard initializes an uninitialized shard.\n\nInitialization consists of fetching the shard's data (deal payload) from\nthe storage subsystem, generating an index, and persisting the index\nto facilitate later retrievals, and/or to publish to external sources.\n\nThis operation is intended to complement the initial migration. The\nmigration registers a shard for every unique piece CID, with lazy\ninitialization. Thus, shards are not initialized immediately to avoid\nIO activity competing with proving. Instead, shard are initialized\nwhen first accessed. This method forces the initialization of a shard by\naccessing it and immediately releasing it. This is useful to warm up the\ncache to facilitate subsequent retrievals, and to generate the indexes\nto publish them externally.\n\nThis operation fails if the shard is not in ShardStateNew state.\nIt blocks until initialization finishes.\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "string", - "summary": "", - "schema": { - "examples": [ - "string value" - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5923" - } - }, - { - "name": "Filecoin.DagstoreListShards", - "description": "```go\nfunc (s *StorageMinerStruct) DagstoreListShards(p0 context.Context) ([]DagstoreShardInfo, error) {\n\tif s.Internal.DagstoreListShards == nil {\n\t\treturn *new([]DagstoreShardInfo), ErrNotSupported\n\t}\n\treturn s.Internal.DagstoreListShards(p0)\n}\n```", - "summary": "DagstoreListShards returns information about all shards known to the\nDAG store. Only available on nodes running the markets subsystem.\n", - "paramStructure": "by-position", - "params": [], - "result": { - "name": "[]DagstoreShardInfo", - "description": "[]DagstoreShardInfo", - "summary": "", - "schema": { - "examples": [ - [ - { - "Key": "baga6ea4seaqecmtz7iak33dsfshi627abz4i4665dfuzr3qfs4bmad6dx3iigdq", - "State": "ShardStateAvailable", - "Error": "\u003cerror\u003e" } ] ], @@ -899,14 +826,76 @@ { "additionalProperties": false, "properties": { - "Error": { - "type": "string" - }, - "Key": { - "type": "string" + "Proposal": { + "additionalProperties": false, + "properties": { + "Client": { + "additionalProperties": false, + "type": "object" + }, + "ClientCollateral": { + "additionalProperties": false, + "type": "object" + }, + "EndEpoch": { + "title": "number", + "type": "number" + }, + "Label": { + "additionalProperties": false, + "type": "object" + }, + "PieceCID": { + "title": "Content Identifier", + "type": "string" + }, + "PieceSize": { + "title": "number", + "type": "number" + }, + "Provider": { + "additionalProperties": false, + "type": "object" + }, + "ProviderCollateral": { + "additionalProperties": false, + "type": "object" + }, + "StartEpoch": { + "title": "number", + "type": "number" + }, + "StoragePricePerEpoch": { + "additionalProperties": false, + "type": "object" + }, + "VerifiedDeal": { + "type": "boolean" + } + }, + "type": "object" }, "State": { - "type": "string" + "additionalProperties": false, + "properties": { + "LastUpdatedEpoch": { + "title": "number", + "type": "number" + }, + "SectorNumber": { + "title": "number", + "type": "number" + }, + "SectorStartEpoch": { + "title": "number", + "type": "number" + }, + "SlashEpoch": { + "title": "number", + "type": "number" + } + }, + "type": "object" } }, "type": [ @@ -924,2772 +913,28 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5934" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5267" } }, { - "name": "Filecoin.DagstoreLookupPieces", - "description": "```go\nfunc (s *StorageMinerStruct) DagstoreLookupPieces(p0 context.Context, p1 cid.Cid) ([]DagstoreShardInfo, error) {\n\tif s.Internal.DagstoreLookupPieces == nil {\n\t\treturn *new([]DagstoreShardInfo), ErrNotSupported\n\t}\n\treturn s.Internal.DagstoreLookupPieces(p0, p1)\n}\n```", - "summary": "DagstoreLookupPieces returns information about shards that contain the given CID.\n", + "name": "Filecoin.MiningBase", + "description": "```go\nfunc (s *StorageMinerStruct) MiningBase(p0 context.Context) (*types.TipSet, error) {\n\tif s.Internal.MiningBase == nil {\n\t\treturn nil, ErrNotSupported\n\t}\n\treturn s.Internal.MiningBase(p0)\n}\n```", + "summary": "", "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "cid.Cid", - "summary": "", - "schema": { - "title": "Content Identifier", - "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash.", - "examples": [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - } - ], + "params": [], "result": { - "name": "[]DagstoreShardInfo", - "description": "[]DagstoreShardInfo", + "name": "*types.TipSet", + "description": "*types.TipSet", "summary": "", "schema": { "examples": [ - [ - { - "Key": "baga6ea4seaqecmtz7iak33dsfshi627abz4i4665dfuzr3qfs4bmad6dx3iigdq", - "State": "ShardStateAvailable", - "Error": "\u003cerror\u003e" - } - ] - ], - "items": [ { - "additionalProperties": false, - "properties": { - "Error": { - "type": "string" - }, - "Key": { - "type": "string" - }, - "State": { - "type": "string" - } - }, - "type": [ - "object" - ] + "Cids": null, + "Blocks": null, + "Height": 0 } ], - "type": [ - "array" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5945" - } - }, - { - "name": "Filecoin.DagstoreRecoverShard", - "description": "```go\nfunc (s *StorageMinerStruct) DagstoreRecoverShard(p0 context.Context, p1 string) error {\n\tif s.Internal.DagstoreRecoverShard == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.DagstoreRecoverShard(p0, p1)\n}\n```", - "summary": "DagstoreRecoverShard attempts to recover a failed shard.\n\nThis operation fails if the shard is not in ShardStateErrored state.\nIt blocks until recovery finishes. If recovery failed, it returns the\nerror.\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "string", - "summary": "", - "schema": { - "examples": [ - "string value" - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5956" - } - }, - { - "name": "Filecoin.DagstoreRegisterShard", - "description": "```go\nfunc (s *StorageMinerStruct) DagstoreRegisterShard(p0 context.Context, p1 string) error {\n\tif s.Internal.DagstoreRegisterShard == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.DagstoreRegisterShard(p0, p1)\n}\n```", - "summary": "DagstoreRegisterShard registers a shard manually with dagstore with given pieceCID\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "string", - "summary": "", - "schema": { - "examples": [ - "string value" - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5967" - } - }, - { - "name": "Filecoin.DealsConsiderOfflineRetrievalDeals", - "description": "```go\nfunc (s *StorageMinerStruct) DealsConsiderOfflineRetrievalDeals(p0 context.Context) (bool, error) {\n\tif s.Internal.DealsConsiderOfflineRetrievalDeals == nil {\n\t\treturn false, ErrNotSupported\n\t}\n\treturn s.Internal.DealsConsiderOfflineRetrievalDeals(p0)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [], - "result": { - "name": "bool", - "description": "bool", - "summary": "", - "schema": { - "examples": [ - true - ], - "type": [ - "boolean" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5978" - } - }, - { - "name": "Filecoin.DealsConsiderOfflineStorageDeals", - "description": "```go\nfunc (s *StorageMinerStruct) DealsConsiderOfflineStorageDeals(p0 context.Context) (bool, error) {\n\tif s.Internal.DealsConsiderOfflineStorageDeals == nil {\n\t\treturn false, ErrNotSupported\n\t}\n\treturn s.Internal.DealsConsiderOfflineStorageDeals(p0)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [], - "result": { - "name": "bool", - "description": "bool", - "summary": "", - "schema": { - "examples": [ - true - ], - "type": [ - "boolean" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5989" - } - }, - { - "name": "Filecoin.DealsConsiderOnlineRetrievalDeals", - "description": "```go\nfunc (s *StorageMinerStruct) DealsConsiderOnlineRetrievalDeals(p0 context.Context) (bool, error) {\n\tif s.Internal.DealsConsiderOnlineRetrievalDeals == nil {\n\t\treturn false, ErrNotSupported\n\t}\n\treturn s.Internal.DealsConsiderOnlineRetrievalDeals(p0)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [], - "result": { - "name": "bool", - "description": "bool", - "summary": "", - "schema": { - "examples": [ - true - ], - "type": [ - "boolean" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6000" - } - }, - { - "name": "Filecoin.DealsConsiderOnlineStorageDeals", - "description": "```go\nfunc (s *StorageMinerStruct) DealsConsiderOnlineStorageDeals(p0 context.Context) (bool, error) {\n\tif s.Internal.DealsConsiderOnlineStorageDeals == nil {\n\t\treturn false, ErrNotSupported\n\t}\n\treturn s.Internal.DealsConsiderOnlineStorageDeals(p0)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [], - "result": { - "name": "bool", - "description": "bool", - "summary": "", - "schema": { - "examples": [ - true - ], - "type": [ - "boolean" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6011" - } - }, - { - "name": "Filecoin.DealsConsiderUnverifiedStorageDeals", - "description": "```go\nfunc (s *StorageMinerStruct) DealsConsiderUnverifiedStorageDeals(p0 context.Context) (bool, error) {\n\tif s.Internal.DealsConsiderUnverifiedStorageDeals == nil {\n\t\treturn false, ErrNotSupported\n\t}\n\treturn s.Internal.DealsConsiderUnverifiedStorageDeals(p0)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [], - "result": { - "name": "bool", - "description": "bool", - "summary": "", - "schema": { - "examples": [ - true - ], - "type": [ - "boolean" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6022" - } - }, - { - "name": "Filecoin.DealsConsiderVerifiedStorageDeals", - "description": "```go\nfunc (s *StorageMinerStruct) DealsConsiderVerifiedStorageDeals(p0 context.Context) (bool, error) {\n\tif s.Internal.DealsConsiderVerifiedStorageDeals == nil {\n\t\treturn false, ErrNotSupported\n\t}\n\treturn s.Internal.DealsConsiderVerifiedStorageDeals(p0)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [], - "result": { - "name": "bool", - "description": "bool", - "summary": "", - "schema": { - "examples": [ - true - ], - "type": [ - "boolean" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6033" - } - }, - { - "name": "Filecoin.DealsImportData", - "description": "```go\nfunc (s *StorageMinerStruct) DealsImportData(p0 context.Context, p1 cid.Cid, p2 string) error {\n\tif s.Internal.DealsImportData == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.DealsImportData(p0, p1, p2)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "cid.Cid", - "summary": "", - "schema": { - "title": "Content Identifier", - "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash.", - "examples": [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - }, - { - "name": "p2", - "description": "string", - "summary": "", - "schema": { - "examples": [ - "string value" - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6044" - } - }, - { - "name": "Filecoin.DealsList", - "description": "```go\nfunc (s *StorageMinerStruct) DealsList(p0 context.Context) ([]*MarketDeal, error) {\n\tif s.Internal.DealsList == nil {\n\t\treturn *new([]*MarketDeal), ErrNotSupported\n\t}\n\treturn s.Internal.DealsList(p0)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [], - "result": { - "name": "[]*MarketDeal", - "description": "[]*MarketDeal", - "summary": "", - "schema": { - "examples": [ - [ - { - "Proposal": { - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1032, - "VerifiedDeal": true, - "Client": "f01234", - "Provider": "f01234", - "Label": "", - "StartEpoch": 10101, - "EndEpoch": 10101, - "StoragePricePerEpoch": "0", - "ProviderCollateral": "0", - "ClientCollateral": "0" - }, - "State": { - "SectorStartEpoch": 10101, - "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101 - } - } - ] - ], - "items": [ - { - "additionalProperties": false, - "properties": { - "Proposal": { - "additionalProperties": false, - "properties": { - "Client": { - "additionalProperties": false, - "type": "object" - }, - "ClientCollateral": { - "additionalProperties": false, - "type": "object" - }, - "EndEpoch": { - "title": "number", - "type": "number" - }, - "Label": { - "additionalProperties": false, - "type": "object" - }, - "PieceCID": { - "title": "Content Identifier", - "type": "string" - }, - "PieceSize": { - "title": "number", - "type": "number" - }, - "Provider": { - "additionalProperties": false, - "type": "object" - }, - "ProviderCollateral": { - "additionalProperties": false, - "type": "object" - }, - "StartEpoch": { - "title": "number", - "type": "number" - }, - "StoragePricePerEpoch": { - "additionalProperties": false, - "type": "object" - }, - "VerifiedDeal": { - "type": "boolean" - } - }, - "type": "object" - }, - "State": { - "additionalProperties": false, - "properties": { - "LastUpdatedEpoch": { - "title": "number", - "type": "number" - }, - "SectorStartEpoch": { - "title": "number", - "type": "number" - }, - "SlashEpoch": { - "title": "number", - "type": "number" - } - }, - "type": "object" - } - }, - "type": [ - "object" - ] - } - ], - "type": [ - "array" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6055" - } - }, - { - "name": "Filecoin.DealsPieceCidBlocklist", - "description": "```go\nfunc (s *StorageMinerStruct) DealsPieceCidBlocklist(p0 context.Context) ([]cid.Cid, error) {\n\tif s.Internal.DealsPieceCidBlocklist == nil {\n\t\treturn *new([]cid.Cid), ErrNotSupported\n\t}\n\treturn s.Internal.DealsPieceCidBlocklist(p0)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [], - "result": { - "name": "[]cid.Cid", - "description": "[]cid.Cid", - "summary": "", - "schema": { - "examples": [ - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - ] - ], - "items": [ - { - "title": "Content Identifier", - "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash.", - "type": [ - "string" - ] - } - ], - "type": [ - "array" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6066" - } - }, - { - "name": "Filecoin.DealsSetConsiderOfflineRetrievalDeals", - "description": "```go\nfunc (s *StorageMinerStruct) DealsSetConsiderOfflineRetrievalDeals(p0 context.Context, p1 bool) error {\n\tif s.Internal.DealsSetConsiderOfflineRetrievalDeals == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.DealsSetConsiderOfflineRetrievalDeals(p0, p1)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "bool", - "summary": "", - "schema": { - "examples": [ - true - ], - "type": [ - "boolean" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6077" - } - }, - { - "name": "Filecoin.DealsSetConsiderOfflineStorageDeals", - "description": "```go\nfunc (s *StorageMinerStruct) DealsSetConsiderOfflineStorageDeals(p0 context.Context, p1 bool) error {\n\tif s.Internal.DealsSetConsiderOfflineStorageDeals == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.DealsSetConsiderOfflineStorageDeals(p0, p1)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "bool", - "summary": "", - "schema": { - "examples": [ - true - ], - "type": [ - "boolean" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6088" - } - }, - { - "name": "Filecoin.DealsSetConsiderOnlineRetrievalDeals", - "description": "```go\nfunc (s *StorageMinerStruct) DealsSetConsiderOnlineRetrievalDeals(p0 context.Context, p1 bool) error {\n\tif s.Internal.DealsSetConsiderOnlineRetrievalDeals == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.DealsSetConsiderOnlineRetrievalDeals(p0, p1)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "bool", - "summary": "", - "schema": { - "examples": [ - true - ], - "type": [ - "boolean" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6099" - } - }, - { - "name": "Filecoin.DealsSetConsiderOnlineStorageDeals", - "description": "```go\nfunc (s *StorageMinerStruct) DealsSetConsiderOnlineStorageDeals(p0 context.Context, p1 bool) error {\n\tif s.Internal.DealsSetConsiderOnlineStorageDeals == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.DealsSetConsiderOnlineStorageDeals(p0, p1)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "bool", - "summary": "", - "schema": { - "examples": [ - true - ], - "type": [ - "boolean" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6110" - } - }, - { - "name": "Filecoin.DealsSetConsiderUnverifiedStorageDeals", - "description": "```go\nfunc (s *StorageMinerStruct) DealsSetConsiderUnverifiedStorageDeals(p0 context.Context, p1 bool) error {\n\tif s.Internal.DealsSetConsiderUnverifiedStorageDeals == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.DealsSetConsiderUnverifiedStorageDeals(p0, p1)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "bool", - "summary": "", - "schema": { - "examples": [ - true - ], - "type": [ - "boolean" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6121" - } - }, - { - "name": "Filecoin.DealsSetConsiderVerifiedStorageDeals", - "description": "```go\nfunc (s *StorageMinerStruct) DealsSetConsiderVerifiedStorageDeals(p0 context.Context, p1 bool) error {\n\tif s.Internal.DealsSetConsiderVerifiedStorageDeals == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.DealsSetConsiderVerifiedStorageDeals(p0, p1)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "bool", - "summary": "", - "schema": { - "examples": [ - true - ], - "type": [ - "boolean" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6132" - } - }, - { - "name": "Filecoin.DealsSetPieceCidBlocklist", - "description": "```go\nfunc (s *StorageMinerStruct) DealsSetPieceCidBlocklist(p0 context.Context, p1 []cid.Cid) error {\n\tif s.Internal.DealsSetPieceCidBlocklist == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.DealsSetPieceCidBlocklist(p0, p1)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "[]cid.Cid", - "summary": "", - "schema": { - "examples": [ - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - ] - ], - "items": [ - { - "title": "Content Identifier", - "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash.", - "type": [ - "string" - ] - } - ], - "type": [ - "array" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6143" - } - }, - { - "name": "Filecoin.IndexerAnnounceAllDeals", - "description": "```go\nfunc (s *StorageMinerStruct) IndexerAnnounceAllDeals(p0 context.Context) error {\n\tif s.Internal.IndexerAnnounceAllDeals == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.IndexerAnnounceAllDeals(p0)\n}\n```", - "summary": "IndexerAnnounceAllDeals informs the indexer nodes aboutall active deals.\n", - "paramStructure": "by-position", - "params": [], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6154" - } - }, - { - "name": "Filecoin.IndexerAnnounceDeal", - "description": "```go\nfunc (s *StorageMinerStruct) IndexerAnnounceDeal(p0 context.Context, p1 cid.Cid) error {\n\tif s.Internal.IndexerAnnounceDeal == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.IndexerAnnounceDeal(p0, p1)\n}\n```", - "summary": "IndexerAnnounceDeal informs indexer nodes that a new deal was received,\nso they can download its index\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "cid.Cid", - "summary": "", - "schema": { - "title": "Content Identifier", - "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash.", - "examples": [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6165" - } - }, - { - "name": "Filecoin.MarketCancelDataTransfer", - "description": "```go\nfunc (s *StorageMinerStruct) MarketCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {\n\tif s.Internal.MarketCancelDataTransfer == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.MarketCancelDataTransfer(p0, p1, p2, p3)\n}\n```", - "summary": "MarketCancelDataTransfer cancels a data transfer with the given transfer ID and other peer\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "datatransfer.TransferID", - "summary": "", - "schema": { - "title": "number", - "description": "Number is a number", - "examples": [ - 3 - ], - "type": [ - "number" - ] - }, - "required": true, - "deprecated": false - }, - { - "name": "p2", - "description": "peer.ID", - "summary": "", - "schema": { - "examples": [ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - }, - { - "name": "p3", - "description": "bool", - "summary": "", - "schema": { - "examples": [ - true - ], - "type": [ - "boolean" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6176" - } - }, - { - "name": "Filecoin.MarketDataTransferDiagnostics", - "description": "```go\nfunc (s *StorageMinerStruct) MarketDataTransferDiagnostics(p0 context.Context, p1 peer.ID) (*TransferDiagnostics, error) {\n\tif s.Internal.MarketDataTransferDiagnostics == nil {\n\t\treturn nil, ErrNotSupported\n\t}\n\treturn s.Internal.MarketDataTransferDiagnostics(p0, p1)\n}\n```", - "summary": "MarketDataTransferDiagnostics generates debugging information about current data transfers over graphsync\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "peer.ID", - "summary": "", - "schema": { - "examples": [ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "*TransferDiagnostics", - "description": "*TransferDiagnostics", - "summary": "", - "schema": { - "examples": [ - { - "ReceivingTransfers": [ - { - "RequestID": {}, - "RequestState": "string value", - "IsCurrentChannelRequest": true, - "ChannelID": { - "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "ID": 3 - }, - "ChannelState": { - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } - }, - "Diagnostics": [ - "string value" - ] - } - ], - "SendingTransfers": [ - { - "RequestID": {}, - "RequestState": "string value", - "IsCurrentChannelRequest": true, - "ChannelID": { - "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "ID": 3 - }, - "ChannelState": { - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } - }, - "Diagnostics": [ - "string value" - ] - } - ] - } - ], - "additionalProperties": false, - "properties": { - "ReceivingTransfers": { - "items": { - "additionalProperties": false, - "properties": { - "ChannelID": { - "additionalProperties": false, - "properties": { - "ID": { - "title": "number", - "type": "number" - }, - "Initiator": { - "type": "string" - }, - "Responder": { - "type": "string" - } - }, - "type": "object" - }, - "ChannelState": { - "additionalProperties": false, - "properties": { - "BaseCID": { - "title": "Content Identifier", - "type": "string" - }, - "IsInitiator": { - "type": "boolean" - }, - "IsSender": { - "type": "boolean" - }, - "Message": { - "type": "string" - }, - "OtherPeer": { - "type": "string" - }, - "Stages": { - "additionalProperties": false, - "properties": { - "Stages": { - "items": { - "additionalProperties": false, - "properties": { - "CreatedTime": { - "additionalProperties": false, - "type": "object" - }, - "Description": { - "type": "string" - }, - "Logs": { - "items": { - "additionalProperties": false, - "properties": { - "Log": { - "type": "string" - }, - "UpdatedTime": { - "additionalProperties": false, - "type": "object" - } - }, - "type": "object" - }, - "type": "array" - }, - "Name": { - "type": "string" - }, - "UpdatedTime": { - "additionalProperties": false, - "type": "object" - } - }, - "type": "object" - }, - "type": "array" - } - }, - "type": "object" - }, - "Status": { - "title": "number", - "type": "number" - }, - "TransferID": { - "title": "number", - "type": "number" - }, - "Transferred": { - "title": "number", - "type": "number" - }, - "Voucher": { - "type": "string" - } - }, - "type": "object" - }, - "Diagnostics": { - "items": { - "type": "string" - }, - "type": "array" - }, - "IsCurrentChannelRequest": { - "type": "boolean" - }, - "RequestID": { - "additionalProperties": false, - "type": "object" - }, - "RequestState": { - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "SendingTransfers": { - "items": { - "additionalProperties": false, - "properties": { - "ChannelID": { - "additionalProperties": false, - "properties": { - "ID": { - "title": "number", - "type": "number" - }, - "Initiator": { - "type": "string" - }, - "Responder": { - "type": "string" - } - }, - "type": "object" - }, - "ChannelState": { - "additionalProperties": false, - "properties": { - "BaseCID": { - "title": "Content Identifier", - "type": "string" - }, - "IsInitiator": { - "type": "boolean" - }, - "IsSender": { - "type": "boolean" - }, - "Message": { - "type": "string" - }, - "OtherPeer": { - "type": "string" - }, - "Stages": { - "additionalProperties": false, - "properties": { - "Stages": { - "items": { - "additionalProperties": false, - "properties": { - "CreatedTime": { - "additionalProperties": false, - "type": "object" - }, - "Description": { - "type": "string" - }, - "Logs": { - "items": { - "additionalProperties": false, - "properties": { - "Log": { - "type": "string" - }, - "UpdatedTime": { - "additionalProperties": false, - "type": "object" - } - }, - "type": "object" - }, - "type": "array" - }, - "Name": { - "type": "string" - }, - "UpdatedTime": { - "additionalProperties": false, - "type": "object" - } - }, - "type": "object" - }, - "type": "array" - } - }, - "type": "object" - }, - "Status": { - "title": "number", - "type": "number" - }, - "TransferID": { - "title": "number", - "type": "number" - }, - "Transferred": { - "title": "number", - "type": "number" - }, - "Voucher": { - "type": "string" - } - }, - "type": "object" - }, - "Diagnostics": { - "items": { - "type": "string" - }, - "type": "array" - }, - "IsCurrentChannelRequest": { - "type": "boolean" - }, - "RequestID": { - "additionalProperties": false, - "type": "object" - }, - "RequestState": { - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - } - }, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6187" - } - }, - { - "name": "Filecoin.MarketGetAsk", - "description": "```go\nfunc (s *StorageMinerStruct) MarketGetAsk(p0 context.Context) (*storagemarket.SignedStorageAsk, error) {\n\tif s.Internal.MarketGetAsk == nil {\n\t\treturn nil, ErrNotSupported\n\t}\n\treturn s.Internal.MarketGetAsk(p0)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [], - "result": { - "name": "*storagemarket.SignedStorageAsk", - "description": "*storagemarket.SignedStorageAsk", - "summary": "", - "schema": { - "examples": [ - { - "Ask": { - "Price": "0", - "VerifiedPrice": "0", - "MinPieceSize": 1032, - "MaxPieceSize": 1032, - "Miner": "f01234", - "Timestamp": 10101, - "Expiry": 10101, - "SeqNo": 42 - }, - "Signature": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - } - } - ], - "additionalProperties": false, - "properties": { - "Ask": { - "additionalProperties": false, - "properties": { - "Expiry": { - "title": "number", - "type": "number" - }, - "MaxPieceSize": { - "title": "number", - "type": "number" - }, - "MinPieceSize": { - "title": "number", - "type": "number" - }, - "Miner": { - "additionalProperties": false, - "type": "object" - }, - "Price": { - "additionalProperties": false, - "type": "object" - }, - "SeqNo": { - "title": "number", - "type": "number" - }, - "Timestamp": { - "title": "number", - "type": "number" - }, - "VerifiedPrice": { - "additionalProperties": false, - "type": "object" - } - }, - "type": "object" - }, - "Signature": { - "additionalProperties": false, - "properties": { - "Data": { - "media": { - "binaryEncoding": "base64" - }, - "type": "string" - }, - "Type": { - "title": "number", - "type": "number" - } - }, - "type": "object" - } - }, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6209" - } - }, - { - "name": "Filecoin.MarketGetRetrievalAsk", - "description": "```go\nfunc (s *StorageMinerStruct) MarketGetRetrievalAsk(p0 context.Context) (*retrievalmarket.Ask, error) {\n\tif s.Internal.MarketGetRetrievalAsk == nil {\n\t\treturn nil, ErrNotSupported\n\t}\n\treturn s.Internal.MarketGetRetrievalAsk(p0)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [], - "result": { - "name": "*retrievalmarket.Ask", - "description": "*retrievalmarket.Ask", - "summary": "", - "schema": { - "examples": [ - { - "PricePerByte": "0", - "UnsealPrice": "0", - "PaymentInterval": 42, - "PaymentIntervalIncrease": 42 - } - ], - "additionalProperties": false, - "properties": { - "PaymentInterval": { - "title": "number", - "type": "number" - }, - "PaymentIntervalIncrease": { - "title": "number", - "type": "number" - }, - "PricePerByte": { - "additionalProperties": false, - "type": "object" - }, - "UnsealPrice": { - "additionalProperties": false, - "type": "object" - } - }, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6231" - } - }, - { - "name": "Filecoin.MarketImportDealData", - "description": "```go\nfunc (s *StorageMinerStruct) MarketImportDealData(p0 context.Context, p1 cid.Cid, p2 string) error {\n\tif s.Internal.MarketImportDealData == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.MarketImportDealData(p0, p1, p2)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "cid.Cid", - "summary": "", - "schema": { - "title": "Content Identifier", - "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash.", - "examples": [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - }, - { - "name": "p2", - "description": "string", - "summary": "", - "schema": { - "examples": [ - "string value" - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6242" - } - }, - { - "name": "Filecoin.MarketListDataTransfers", - "description": "```go\nfunc (s *StorageMinerStruct) MarketListDataTransfers(p0 context.Context) ([]DataTransferChannel, error) {\n\tif s.Internal.MarketListDataTransfers == nil {\n\t\treturn *new([]DataTransferChannel), ErrNotSupported\n\t}\n\treturn s.Internal.MarketListDataTransfers(p0)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [], - "result": { - "name": "[]DataTransferChannel", - "description": "[]DataTransferChannel", - "summary": "", - "schema": { - "examples": [ - [ - { - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } - } - ] - ], - "items": [ - { - "additionalProperties": false, - "properties": { - "BaseCID": { - "title": "Content Identifier", - "type": "string" - }, - "IsInitiator": { - "type": "boolean" - }, - "IsSender": { - "type": "boolean" - }, - "Message": { - "type": "string" - }, - "OtherPeer": { - "type": "string" - }, - "Stages": { - "additionalProperties": false, - "properties": { - "Stages": { - "items": { - "additionalProperties": false, - "properties": { - "CreatedTime": { - "additionalProperties": false, - "type": "object" - }, - "Description": { - "type": "string" - }, - "Logs": { - "items": { - "additionalProperties": false, - "properties": { - "Log": { - "type": "string" - }, - "UpdatedTime": { - "additionalProperties": false, - "type": "object" - } - }, - "type": "object" - }, - "type": "array" - }, - "Name": { - "type": "string" - }, - "UpdatedTime": { - "additionalProperties": false, - "type": "object" - } - }, - "type": "object" - }, - "type": "array" - } - }, - "type": "object" - }, - "Status": { - "title": "number", - "type": "number" - }, - "TransferID": { - "title": "number", - "type": "number" - }, - "Transferred": { - "title": "number", - "type": "number" - }, - "Voucher": { - "type": "string" - } - }, - "type": [ - "object" - ] - } - ], - "type": [ - "array" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6253" - } - }, - { - "name": "Filecoin.MarketListDeals", - "description": "```go\nfunc (s *StorageMinerStruct) MarketListDeals(p0 context.Context) ([]*MarketDeal, error) {\n\tif s.Internal.MarketListDeals == nil {\n\t\treturn *new([]*MarketDeal), ErrNotSupported\n\t}\n\treturn s.Internal.MarketListDeals(p0)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [], - "result": { - "name": "[]*MarketDeal", - "description": "[]*MarketDeal", - "summary": "", - "schema": { - "examples": [ - [ - { - "Proposal": { - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1032, - "VerifiedDeal": true, - "Client": "f01234", - "Provider": "f01234", - "Label": "", - "StartEpoch": 10101, - "EndEpoch": 10101, - "StoragePricePerEpoch": "0", - "ProviderCollateral": "0", - "ClientCollateral": "0" - }, - "State": { - "SectorStartEpoch": 10101, - "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101 - } - } - ] - ], - "items": [ - { - "additionalProperties": false, - "properties": { - "Proposal": { - "additionalProperties": false, - "properties": { - "Client": { - "additionalProperties": false, - "type": "object" - }, - "ClientCollateral": { - "additionalProperties": false, - "type": "object" - }, - "EndEpoch": { - "title": "number", - "type": "number" - }, - "Label": { - "additionalProperties": false, - "type": "object" - }, - "PieceCID": { - "title": "Content Identifier", - "type": "string" - }, - "PieceSize": { - "title": "number", - "type": "number" - }, - "Provider": { - "additionalProperties": false, - "type": "object" - }, - "ProviderCollateral": { - "additionalProperties": false, - "type": "object" - }, - "StartEpoch": { - "title": "number", - "type": "number" - }, - "StoragePricePerEpoch": { - "additionalProperties": false, - "type": "object" - }, - "VerifiedDeal": { - "type": "boolean" - } - }, - "type": "object" - }, - "State": { - "additionalProperties": false, - "properties": { - "LastUpdatedEpoch": { - "title": "number", - "type": "number" - }, - "SectorStartEpoch": { - "title": "number", - "type": "number" - }, - "SlashEpoch": { - "title": "number", - "type": "number" - } - }, - "type": "object" - } - }, - "type": [ - "object" - ] - } - ], - "type": [ - "array" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6264" - } - }, - { - "name": "Filecoin.MarketListIncompleteDeals", - "description": "```go\nfunc (s *StorageMinerStruct) MarketListIncompleteDeals(p0 context.Context) ([]storagemarket.MinerDeal, error) {\n\tif s.Internal.MarketListIncompleteDeals == nil {\n\t\treturn *new([]storagemarket.MinerDeal), ErrNotSupported\n\t}\n\treturn s.Internal.MarketListIncompleteDeals(p0)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [], - "result": { - "name": "[]storagemarket.MinerDeal", - "description": "[]storagemarket.MinerDeal", - "summary": "", - "schema": { - "examples": [ - [ - { - "Proposal": { - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1032, - "VerifiedDeal": true, - "Client": "f01234", - "Provider": "f01234", - "Label": "", - "StartEpoch": 10101, - "EndEpoch": 10101, - "StoragePricePerEpoch": "0", - "ProviderCollateral": "0", - "ClientCollateral": "0" - }, - "ClientSignature": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - }, - "ProposalCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "AddFundsCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PublishCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Miner": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Client": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "State": 42, - "PiecePath": ".lotusminer/fstmp123", - "MetadataPath": ".lotusminer/fstmp123", - "SlashEpoch": 10101, - "FastRetrieval": true, - "Message": "string value", - "FundsReserved": "0", - "Ref": { - "TransferType": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1024, - "RawBlockSize": 42 - }, - "AvailableForRetrieval": true, - "DealID": 5432, - "CreationTime": "0001-01-01T00:00:00Z", - "TransferChannelId": { - "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "ID": 3 - }, - "SectorNumber": 9, - "InboundCAR": "string value" - } - ] - ], - "items": [ - { - "additionalProperties": false, - "properties": { - "AddFundsCid": { - "title": "Content Identifier", - "type": "string" - }, - "AvailableForRetrieval": { - "type": "boolean" - }, - "Client": { - "type": "string" - }, - "ClientSignature": { - "additionalProperties": false, - "properties": { - "Data": { - "media": { - "binaryEncoding": "base64" - }, - "type": "string" - }, - "Type": { - "title": "number", - "type": "number" - } - }, - "type": "object" - }, - "CreationTime": { - "additionalProperties": false, - "type": "object" - }, - "DealID": { - "title": "number", - "type": "number" - }, - "FastRetrieval": { - "type": "boolean" - }, - "FundsReserved": { - "additionalProperties": false, - "type": "object" - }, - "InboundCAR": { - "type": "string" - }, - "Message": { - "type": "string" - }, - "MetadataPath": { - "type": "string" - }, - "Miner": { - "type": "string" - }, - "PiecePath": { - "type": "string" - }, - "Proposal": { - "additionalProperties": false, - "properties": { - "Client": { - "additionalProperties": false, - "type": "object" - }, - "ClientCollateral": { - "additionalProperties": false, - "type": "object" - }, - "EndEpoch": { - "title": "number", - "type": "number" - }, - "Label": { - "additionalProperties": false, - "type": "object" - }, - "PieceCID": { - "title": "Content Identifier", - "type": "string" - }, - "PieceSize": { - "title": "number", - "type": "number" - }, - "Provider": { - "additionalProperties": false, - "type": "object" - }, - "ProviderCollateral": { - "additionalProperties": false, - "type": "object" - }, - "StartEpoch": { - "title": "number", - "type": "number" - }, - "StoragePricePerEpoch": { - "additionalProperties": false, - "type": "object" - }, - "VerifiedDeal": { - "type": "boolean" - } - }, - "type": "object" - }, - "ProposalCid": { - "title": "Content Identifier", - "type": "string" - }, - "PublishCid": { - "title": "Content Identifier", - "type": "string" - }, - "Ref": { - "additionalProperties": false, - "properties": { - "PieceCid": { - "title": "Content Identifier", - "type": "string" - }, - "PieceSize": { - "title": "number", - "type": "number" - }, - "RawBlockSize": { - "title": "number", - "type": "number" - }, - "Root": { - "title": "Content Identifier", - "type": "string" - }, - "TransferType": { - "type": "string" - } - }, - "type": "object" - }, - "SectorNumber": { - "title": "number", - "type": "number" - }, - "SlashEpoch": { - "title": "number", - "type": "number" - }, - "State": { - "title": "number", - "type": "number" - }, - "TransferChannelId": { - "additionalProperties": false, - "properties": { - "ID": { - "title": "number", - "type": "number" - }, - "Initiator": { - "type": "string" - }, - "Responder": { - "type": "string" - } - }, - "type": "object" - } - }, - "type": [ - "object" - ] - } - ], - "type": [ - "array" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6275" - } - }, - { - "name": "Filecoin.MarketListRetrievalDeals", - "description": "```go\nfunc (s *StorageMinerStruct) MarketListRetrievalDeals(p0 context.Context) ([]struct{}, error) {\n\tif s.Internal.MarketListRetrievalDeals == nil {\n\t\treturn *new([]struct{}), ErrNotSupported\n\t}\n\treturn s.Internal.MarketListRetrievalDeals(p0)\n}\n```", - "summary": "MarketListRetrievalDeals is deprecated, returns empty list\n", - "paramStructure": "by-position", - "params": [], - "result": { - "name": "[]struct{}", - "description": "[]struct{}", - "summary": "", - "schema": { - "examples": [ - [ - {} - ] - ], - "items": [ - { - "additionalProperties": false, - "type": [ - "object" - ] - } - ], - "type": [ - "array" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6286" - } - }, - { - "name": "Filecoin.MarketPendingDeals", - "description": "```go\nfunc (s *StorageMinerStruct) MarketPendingDeals(p0 context.Context) (PendingDealInfo, error) {\n\tif s.Internal.MarketPendingDeals == nil {\n\t\treturn *new(PendingDealInfo), ErrNotSupported\n\t}\n\treturn s.Internal.MarketPendingDeals(p0)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [], - "result": { - "name": "PendingDealInfo", - "description": "PendingDealInfo", - "summary": "", - "schema": { - "examples": [ - { - "Deals": [ - { - "Proposal": { - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1032, - "VerifiedDeal": true, - "Client": "f01234", - "Provider": "f01234", - "Label": "", - "StartEpoch": 10101, - "EndEpoch": 10101, - "StoragePricePerEpoch": "0", - "ProviderCollateral": "0", - "ClientCollateral": "0" - }, - "ClientSignature": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - } - } - ], - "PublishPeriodStart": "0001-01-01T00:00:00Z", - "PublishPeriod": 60000000000 - } - ], - "additionalProperties": false, - "properties": { - "Deals": { - "items": { - "additionalProperties": false, - "properties": { - "ClientSignature": { - "additionalProperties": false, - "properties": { - "Data": { - "media": { - "binaryEncoding": "base64" - }, - "type": "string" - }, - "Type": { - "title": "number", - "type": "number" - } - }, - "type": "object" - }, - "Proposal": { - "additionalProperties": false, - "properties": { - "Client": { - "additionalProperties": false, - "type": "object" - }, - "ClientCollateral": { - "additionalProperties": false, - "type": "object" - }, - "EndEpoch": { - "title": "number", - "type": "number" - }, - "Label": { - "additionalProperties": false, - "type": "object" - }, - "PieceCID": { - "title": "Content Identifier", - "type": "string" - }, - "PieceSize": { - "title": "number", - "type": "number" - }, - "Provider": { - "additionalProperties": false, - "type": "object" - }, - "ProviderCollateral": { - "additionalProperties": false, - "type": "object" - }, - "StartEpoch": { - "title": "number", - "type": "number" - }, - "StoragePricePerEpoch": { - "additionalProperties": false, - "type": "object" - }, - "VerifiedDeal": { - "type": "boolean" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "type": "array" - }, - "PublishPeriod": { - "title": "number", - "type": "number" - }, - "PublishPeriodStart": { - "format": "date-time", - "type": "string" - } - }, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6297" - } - }, - { - "name": "Filecoin.MarketPublishPendingDeals", - "description": "```go\nfunc (s *StorageMinerStruct) MarketPublishPendingDeals(p0 context.Context) error {\n\tif s.Internal.MarketPublishPendingDeals == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.MarketPublishPendingDeals(p0)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6308" - } - }, - { - "name": "Filecoin.MarketRestartDataTransfer", - "description": "```go\nfunc (s *StorageMinerStruct) MarketRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {\n\tif s.Internal.MarketRestartDataTransfer == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.MarketRestartDataTransfer(p0, p1, p2, p3)\n}\n```", - "summary": "MarketRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer\n", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "datatransfer.TransferID", - "summary": "", - "schema": { - "title": "number", - "description": "Number is a number", - "examples": [ - 3 - ], - "type": [ - "number" - ] - }, - "required": true, - "deprecated": false - }, - { - "name": "p2", - "description": "peer.ID", - "summary": "", - "schema": { - "examples": [ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - }, - { - "name": "p3", - "description": "bool", - "summary": "", - "schema": { - "examples": [ - true - ], - "type": [ - "boolean" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6319" - } - }, - { - "name": "Filecoin.MarketRetryPublishDeal", - "description": "```go\nfunc (s *StorageMinerStruct) MarketRetryPublishDeal(p0 context.Context, p1 cid.Cid) error {\n\tif s.Internal.MarketRetryPublishDeal == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.MarketRetryPublishDeal(p0, p1)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "cid.Cid", - "summary": "", - "schema": { - "title": "Content Identifier", - "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash.", - "examples": [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6330" - } - }, - { - "name": "Filecoin.MarketSetAsk", - "description": "```go\nfunc (s *StorageMinerStruct) MarketSetAsk(p0 context.Context, p1 types.BigInt, p2 types.BigInt, p3 abi.ChainEpoch, p4 abi.PaddedPieceSize, p5 abi.PaddedPieceSize) error {\n\tif s.Internal.MarketSetAsk == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.MarketSetAsk(p0, p1, p2, p3, p4, p5)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "types.BigInt", - "summary": "", - "schema": { - "examples": [ - "0" - ], - "additionalProperties": false, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - }, - { - "name": "p2", - "description": "types.BigInt", - "summary": "", - "schema": { - "examples": [ - "0" - ], - "additionalProperties": false, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - }, - { - "name": "p3", - "description": "abi.ChainEpoch", - "summary": "", - "schema": { - "title": "number", - "description": "Number is a number", - "examples": [ - 10101 - ], - "type": [ - "number" - ] - }, - "required": true, - "deprecated": false - }, - { - "name": "p4", - "description": "abi.PaddedPieceSize", - "summary": "", - "schema": { - "title": "number", - "description": "Number is a number", - "examples": [ - 1032 - ], - "type": [ - "number" - ] - }, - "required": true, - "deprecated": false - }, - { - "name": "p5", - "description": "abi.PaddedPieceSize", - "summary": "", - "schema": { - "title": "number", - "description": "Number is a number", - "examples": [ - 1032 - ], - "type": [ - "number" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6341" - } - }, - { - "name": "Filecoin.MarketSetRetrievalAsk", - "description": "```go\nfunc (s *StorageMinerStruct) MarketSetRetrievalAsk(p0 context.Context, p1 *retrievalmarket.Ask) error {\n\tif s.Internal.MarketSetRetrievalAsk == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.MarketSetRetrievalAsk(p0, p1)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "*retrievalmarket.Ask", - "summary": "", - "schema": { - "examples": [ - { - "PricePerByte": "0", - "UnsealPrice": "0", - "PaymentInterval": 42, - "PaymentIntervalIncrease": 42 - } - ], - "additionalProperties": false, - "properties": { - "PaymentInterval": { - "title": "number", - "type": "number" - }, - "PaymentIntervalIncrease": { - "title": "number", - "type": "number" - }, - "PricePerByte": { - "additionalProperties": false, - "type": "object" - }, - "UnsealPrice": { - "additionalProperties": false, - "type": "object" - } - }, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "Null", - "description": "Null", - "schema": { - "type": [ - "null" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6352" - } - }, - { - "name": "Filecoin.MiningBase", - "description": "```go\nfunc (s *StorageMinerStruct) MiningBase(p0 context.Context) (*types.TipSet, error) {\n\tif s.Internal.MiningBase == nil {\n\t\treturn nil, ErrNotSupported\n\t}\n\treturn s.Internal.MiningBase(p0)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [], - "result": { - "name": "*types.TipSet", - "description": "*types.TipSet", - "summary": "", - "schema": { - "examples": [ - { - "Cids": null, - "Blocks": null, - "Height": 0 - } - ], - "additionalProperties": false, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6363" - } - }, - { - "name": "Filecoin.PiecesGetCIDInfo", - "description": "```go\nfunc (s *StorageMinerStruct) PiecesGetCIDInfo(p0 context.Context, p1 cid.Cid) (*piecestore.CIDInfo, error) {\n\tif s.Internal.PiecesGetCIDInfo == nil {\n\t\treturn nil, ErrNotSupported\n\t}\n\treturn s.Internal.PiecesGetCIDInfo(p0, p1)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "cid.Cid", - "summary": "", - "schema": { - "title": "Content Identifier", - "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash.", - "examples": [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "*piecestore.CIDInfo", - "description": "*piecestore.CIDInfo", - "summary": "", - "schema": { - "examples": [ - { - "CID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceBlockLocations": [ - { - "RelOffset": 42, - "BlockSize": 42, - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - } - ] - } - ], - "additionalProperties": false, - "properties": { - "CID": { - "title": "Content Identifier", - "type": "string" - }, - "PieceBlockLocations": { - "items": { - "additionalProperties": false, - "properties": { - "BlockSize": { - "title": "number", - "type": "number" - }, - "PieceCID": { - "title": "Content Identifier", - "type": "string" - }, - "RelOffset": { - "title": "number", - "type": "number" - } - }, - "type": "object" - }, - "type": "array" - } - }, - "type": [ - "object" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6374" - } - }, - { - "name": "Filecoin.PiecesGetPieceInfo", - "description": "```go\nfunc (s *StorageMinerStruct) PiecesGetPieceInfo(p0 context.Context, p1 cid.Cid) (*piecestore.PieceInfo, error) {\n\tif s.Internal.PiecesGetPieceInfo == nil {\n\t\treturn nil, ErrNotSupported\n\t}\n\treturn s.Internal.PiecesGetPieceInfo(p0, p1)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [ - { - "name": "p1", - "description": "cid.Cid", - "summary": "", - "schema": { - "title": "Content Identifier", - "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash.", - "examples": [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - ], - "type": [ - "string" - ] - }, - "required": true, - "deprecated": false - } - ], - "result": { - "name": "*piecestore.PieceInfo", - "description": "*piecestore.PieceInfo", - "summary": "", - "schema": { - "examples": [ - { - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Deals": [ - { - "DealID": 5432, - "SectorID": 9, - "Offset": 1032, - "Length": 1032 - } - ] - } - ], - "additionalProperties": false, - "properties": { - "Deals": { - "items": { - "additionalProperties": false, - "properties": { - "DealID": { - "title": "number", - "type": "number" - }, - "Length": { - "title": "number", - "type": "number" - }, - "Offset": { - "title": "number", - "type": "number" - }, - "SectorID": { - "title": "number", - "type": "number" - } - }, - "type": "object" - }, - "type": "array" - }, - "PieceCID": { - "title": "Content Identifier", - "type": "string" - } - }, + "additionalProperties": false, "type": [ "object" ] @@ -3700,87 +945,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6385" - } - }, - { - "name": "Filecoin.PiecesListCidInfos", - "description": "```go\nfunc (s *StorageMinerStruct) PiecesListCidInfos(p0 context.Context) ([]cid.Cid, error) {\n\tif s.Internal.PiecesListCidInfos == nil {\n\t\treturn *new([]cid.Cid), ErrNotSupported\n\t}\n\treturn s.Internal.PiecesListCidInfos(p0)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [], - "result": { - "name": "[]cid.Cid", - "description": "[]cid.Cid", - "summary": "", - "schema": { - "examples": [ - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - ] - ], - "items": [ - { - "title": "Content Identifier", - "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash.", - "type": [ - "string" - ] - } - ], - "type": [ - "array" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6396" - } - }, - { - "name": "Filecoin.PiecesListPieces", - "description": "```go\nfunc (s *StorageMinerStruct) PiecesListPieces(p0 context.Context) ([]cid.Cid, error) {\n\tif s.Internal.PiecesListPieces == nil {\n\t\treturn *new([]cid.Cid), ErrNotSupported\n\t}\n\treturn s.Internal.PiecesListPieces(p0)\n}\n```", - "summary": "", - "paramStructure": "by-position", - "params": [], - "result": { - "name": "[]cid.Cid", - "description": "[]cid.Cid", - "summary": "", - "schema": { - "examples": [ - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - ] - ], - "items": [ - { - "title": "Content Identifier", - "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash.", - "type": [ - "string" - ] - } - ], - "type": [ - "array" - ] - }, - "required": true, - "deprecated": false - }, - "deprecated": false, - "externalDocs": { - "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6407" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5278" } }, { @@ -3821,7 +986,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6418" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5289" } }, { @@ -3889,7 +1054,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6429" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5300" } }, { @@ -4020,7 +1185,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6440" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5311" } }, { @@ -4151,7 +1316,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6451" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5322" } }, { @@ -4251,7 +1416,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6462" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5333" } }, { @@ -4351,7 +1516,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6473" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5344" } }, { @@ -4451,7 +1616,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6484" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5355" } }, { @@ -4551,7 +1716,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6495" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5366" } }, { @@ -4651,7 +1816,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6506" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5377" } }, { @@ -4751,7 +1916,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6517" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5388" } }, { @@ -4875,7 +2040,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6528" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5399" } }, { @@ -4999,7 +2164,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6539" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5410" } }, { @@ -5114,7 +2279,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6550" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5421" } }, { @@ -5214,7 +2379,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6561" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5432" } }, { @@ -5347,7 +2512,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6572" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5443" } }, { @@ -5471,7 +2636,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6583" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5454" } }, { @@ -5595,7 +2760,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6594" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5465" } }, { @@ -5719,7 +2884,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6605" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5476" } }, { @@ -5852,7 +3017,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6616" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5487" } }, { @@ -5952,7 +3117,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6627" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5498" } }, { @@ -5970,8 +3135,7 @@ [ "Mining", "Sealing", - "SectorStorage", - "Markets" + "SectorStorage" ] ], "items": [ @@ -5993,7 +3157,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6638" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5509" } }, { @@ -6065,7 +3229,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6649" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5520" } }, { @@ -6115,7 +3279,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6660" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5531" } }, { @@ -6159,7 +3323,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6671" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5542" } }, { @@ -6200,7 +3364,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6682" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5553" } }, { @@ -6444,7 +3608,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6693" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5564" } }, { @@ -6518,7 +3682,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6704" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5575" } }, { @@ -6568,7 +3732,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6715" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5586" } }, { @@ -6597,7 +3761,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6726" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5597" } }, { @@ -6626,7 +3790,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6737" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5608" } }, { @@ -6682,7 +3846,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6748" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5619" } }, { @@ -6705,7 +3869,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6759" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5630" } }, { @@ -6765,7 +3929,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6770" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5641" } }, { @@ -6804,7 +3968,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6781" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5652" } }, { @@ -6844,7 +4008,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6792" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5663" } }, { @@ -6917,7 +4081,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6803" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5674" } }, { @@ -6981,7 +4145,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6814" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5685" } }, { @@ -7044,7 +4208,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6825" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5696" } }, { @@ -7094,7 +4258,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6836" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5707" } }, { @@ -7653,7 +4817,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6847" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5718" } }, { @@ -7694,7 +4858,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6858" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5729" } }, { @@ -7735,7 +4899,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6869" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5740" } }, { @@ -7776,7 +4940,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6880" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5751" } }, { @@ -7817,7 +4981,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6891" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5762" } }, { @@ -7858,7 +5022,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6902" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5773" } }, { @@ -7889,7 +5053,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6913" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5784" } }, { @@ -7939,7 +5103,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6924" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5795" } }, { @@ -7980,7 +5144,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6935" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5806" } }, { @@ -8019,7 +5183,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6946" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5817" } }, { @@ -8083,7 +5247,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6957" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5828" } }, { @@ -8141,7 +5305,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6968" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5839" } }, { @@ -8588,7 +5752,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6979" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5850" } }, { @@ -8624,7 +5788,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6990" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5861" } }, { @@ -8767,7 +5931,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7001" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5872" } }, { @@ -8823,7 +5987,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7012" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5883" } }, { @@ -8862,7 +6026,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7023" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5894" } }, { @@ -9039,7 +6203,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7034" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5905" } }, { @@ -9091,7 +6255,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7045" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5916" } }, { @@ -9283,7 +6447,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7056" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5927" } }, { @@ -9383,7 +6547,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7067" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5938" } }, { @@ -9437,7 +6601,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7078" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5949" } }, { @@ -9476,7 +6640,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7089" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5960" } }, { @@ -9561,7 +6725,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7100" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5971" } }, { @@ -9755,7 +6919,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7111" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5982" } }, { @@ -9853,7 +7017,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7122" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5993" } }, { @@ -9985,7 +7149,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7133" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6004" } }, { @@ -10039,7 +7203,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7144" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6015" } }, { @@ -10073,7 +7237,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7155" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6026" } }, { @@ -10160,7 +7324,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7166" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6037" } }, { @@ -10214,7 +7378,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7177" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6048" } }, { @@ -10314,7 +7478,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7188" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6059" } }, { @@ -10391,7 +7555,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7199" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6070" } }, { @@ -10482,7 +7646,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7210" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6081" } }, { @@ -10521,7 +7685,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7221" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6092" } }, { @@ -10637,7 +7801,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7232" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6103" } }, { @@ -12737,7 +9901,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7243" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6114" } } ] diff --git a/build/openrpc/miner.json.gz b/build/openrpc/miner.json.gz deleted file mode 100644 index 5fbd42c3db8..00000000000 Binary files a/build/openrpc/miner.json.gz and /dev/null differ diff --git a/build/openrpc/worker.json b/build/openrpc/worker.json index 889853b5f00..d962d914484 100644 --- a/build/openrpc/worker.json +++ b/build/openrpc/worker.json @@ -2,7 +2,7 @@ "openrpc": "1.2.6", "info": { "title": "Lotus RPC API", - "version": "1.27.0" + "version": "1.27.1" }, "methods": [ { @@ -161,7 +161,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7331" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6202" } }, { @@ -252,7 +252,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7342" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6213" } }, { @@ -420,7 +420,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7353" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6224" } }, { @@ -447,7 +447,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7364" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6235" } }, { @@ -597,7 +597,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7375" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6246" } }, { @@ -700,7 +700,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7386" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6257" } }, { @@ -803,7 +803,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7397" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6268" } }, { @@ -925,7 +925,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7408" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6279" } }, { @@ -1135,7 +1135,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7419" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6290" } }, { @@ -1306,7 +1306,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7430" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6301" } }, { @@ -3350,7 +3350,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7441" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6312" } }, { @@ -3470,7 +3470,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7452" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6323" } }, { @@ -3531,7 +3531,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7463" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6334" } }, { @@ -3569,7 +3569,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7474" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6345" } }, { @@ -3729,7 +3729,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7485" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6356" } }, { @@ -3913,7 +3913,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7496" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6367" } }, { @@ -4054,7 +4054,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7507" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6378" } }, { @@ -4107,7 +4107,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7518" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6389" } }, { @@ -4250,7 +4250,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7529" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6400" } }, { @@ -4474,7 +4474,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7540" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6411" } }, { @@ -4601,7 +4601,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7551" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6422" } }, { @@ -4768,7 +4768,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7562" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6433" } }, { @@ -4895,7 +4895,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7573" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6444" } }, { @@ -4933,7 +4933,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7584" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6455" } }, { @@ -4972,7 +4972,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7595" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6466" } }, { @@ -4995,7 +4995,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7606" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6477" } }, { @@ -5034,7 +5034,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7617" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6488" } }, { @@ -5057,7 +5057,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7628" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6499" } }, { @@ -5096,7 +5096,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7639" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6510" } }, { @@ -5130,7 +5130,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7650" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6521" } }, { @@ -5184,7 +5184,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7661" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6532" } }, { @@ -5223,7 +5223,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7672" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6543" } }, { @@ -5262,7 +5262,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7683" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6554" } }, { @@ -5297,7 +5297,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7694" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6565" } }, { @@ -5477,7 +5477,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7705" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6576" } }, { @@ -5506,7 +5506,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7716" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6587" } }, { @@ -5529,7 +5529,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7727" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6598" } } ] diff --git a/build/openrpc/worker.json.gz b/build/openrpc/worker.json.gz deleted file mode 100644 index 0f3cb11c7a3..00000000000 Binary files a/build/openrpc/worker.json.gz and /dev/null differ diff --git a/build/panic_reporter.go b/build/panic_reporter.go index 617d619eb46..7b40f8bc3a2 100644 --- a/build/panic_reporter.go +++ b/build/panic_reporter.go @@ -28,13 +28,31 @@ var PanicReportingPath = "panic-reports" // the lotus journal to be included in the panic report. var PanicReportJournalTail = defaultJournalTail -// GeneratePanicReport produces a timestamped dump of the application state +// GenerateNodePanicReport produces a timestamped dump of the application state // for inspection and debugging purposes. Call this function from any place // where a panic or severe error needs to be examined. `persistPath` is the // path where the reports should be saved. `repoPath` is the path where the // journal should be read from. `label` is an optional string to include // next to the report timestamp. -func GeneratePanicReport(persistPath, repoPath, label string) { +// +// This function should be called for panics originating from the Lotus daemon. +func GenerateNodePanicReport(persistPath, repoPath, label string) { + generatePanicReport(NodeUserVersion(), persistPath, repoPath, label) +} + +// GenerateMinerPanicReport produces a timestamped dump of the application state +// for inspection and debugging purposes. Call this function from any place +// where a panic or severe error needs to be examined. `persistPath` is the +// path where the reports should be saved. `repoPath` is the path where the +// journal should be read from. `label` is an optional string to include +// next to the report timestamp. +// +// This function should be called for panics originating from the Lotus miner. +func GenerateMinerPanicReport(persistPath, repoPath, label string) { + generatePanicReport(MinerUserVersion(), persistPath, repoPath, label) +} + +func generatePanicReport(buildVersion BuildVersion, persistPath, repoPath, label string) { // make sure we always dump the latest logs on the way out // especially since we're probably panicking defer panicLog.Sync() //nolint:errcheck @@ -64,21 +82,21 @@ func GeneratePanicReport(persistPath, repoPath, label string) { return } - writeAppVersion(filepath.Join(reportPath, "version")) + writeAppVersion(buildVersion, filepath.Join(reportPath, "version")) writeStackTrace(filepath.Join(reportPath, "stacktrace.dump")) writeProfile("goroutines", filepath.Join(reportPath, "goroutines.pprof.gz")) writeProfile("heap", filepath.Join(reportPath, "heap.pprof.gz")) writeJournalTail(PanicReportJournalTail, repoPath, filepath.Join(reportPath, "journal.ndjson")) } -func writeAppVersion(file string) { +func writeAppVersion(buildVersion BuildVersion, file string) { f, err := os.Create(file) if err != nil { panicLog.Error(err.Error()) } defer f.Close() //nolint:errcheck - versionString := []byte(BuildVersion + BuildTypeString() + CurrentCommit + "\n") + versionString := []byte(string(buildVersion) + BuildTypeString() + CurrentCommit + "\n") if _, err := f.Write(versionString); err != nil { panicLog.Error(err.Error()) } diff --git a/build/params_2k.go b/build/params_2k.go index 03cfd82de0b..7c754611eb4 100644 --- a/build/params_2k.go +++ b/build/params_2k.go @@ -23,7 +23,7 @@ var NetworkBundle = "devnet" var BundleOverrides map[actorstypes.Version]string var ActorDebugging = true -var GenesisNetworkVersion = network.Version21 +var GenesisNetworkVersion = network.Version22 var UpgradeBreezeHeight = abi.ChainEpoch(-1) @@ -67,9 +67,11 @@ var UpgradeThunderHeight = abi.ChainEpoch(-23) var UpgradeWatermelonHeight = abi.ChainEpoch(-24) -var UpgradeDragonHeight = abi.ChainEpoch(20) +var UpgradeDragonHeight = abi.ChainEpoch(-24) -var UpgradePhoenixHeight = UpgradeDragonHeight + 120 +var UpgradePhoenixHeight = abi.ChainEpoch(-25) + +var UpgradeAussieHeight = abi.ChainEpoch(200) // This fix upgrade only ran on calibrationnet const UpgradeWatermelonFixHeight = -100 @@ -154,6 +156,7 @@ func init() { UpgradeThunderHeight = getUpgradeHeight("LOTUS_THUNDER_HEIGHT", UpgradeThunderHeight) UpgradeWatermelonHeight = getUpgradeHeight("LOTUS_WATERMELON_HEIGHT", UpgradeWatermelonHeight) UpgradeDragonHeight = getUpgradeHeight("LOTUS_DRAGON_HEIGHT", UpgradeDragonHeight) + UpgradeAussieHeight = getUpgradeHeight("LOTUS_AUSSIE_HEIGHT", UpgradeAussieHeight) UpgradePhoenixHeight = getUpgradeHeight("LOTUS_PHOENIX_HEIGHT", UpgradePhoenixHeight) DrandSchedule = map[abi.ChainEpoch]DrandEnum{ diff --git a/build/params_butterfly.go b/build/params_butterfly.go index aa3c8a68f84..fee1a434326 100644 --- a/build/params_butterfly.go +++ b/build/params_butterfly.go @@ -56,10 +56,10 @@ const UpgradeHyggeHeight = -21 const UpgradeLightningHeight = -22 const UpgradeThunderHeight = -23 const UpgradeWatermelonHeight = -24 +const UpgradeDragonHeight = -25 +const UpgradePhoenixHeight = -26 -const UpgradeDragonHeight = 5760 - -const UpgradePhoenixHeight = UpgradeDragonHeight + 120 +const UpgradeAussieHeight = 400 // This fix upgrade only ran on calibrationnet const UpgradeWatermelonFixHeight = -100 diff --git a/build/params_calibnet.go b/build/params_calibnet.go index 1677027d73d..889f6519635 100644 --- a/build/params_calibnet.go +++ b/build/params_calibnet.go @@ -98,6 +98,9 @@ const UpgradePhoenixHeight = UpgradeDragonHeight + 120 // 2024-04-03T11:00:00Z const UpgradeCalibrationDragonFixHeight = 1493854 +// ????? +const UpgradeAussieHeight = 999999999999999 + var SupportedProofTypes = []abi.RegisteredSealProof{ abi.RegisteredSealProof_StackedDrg32GiBV1, abi.RegisteredSealProof_StackedDrg64GiBV1, diff --git a/build/params_interop.go b/build/params_interop.go index 9f34854a03a..18e4a464be0 100644 --- a/build/params_interop.go +++ b/build/params_interop.go @@ -54,8 +54,9 @@ var UpgradeHyggeHeight = abi.ChainEpoch(-21) var UpgradeLightningHeight = abi.ChainEpoch(-22) var UpgradeThunderHeight = abi.ChainEpoch(-23) var UpgradeWatermelonHeight = abi.ChainEpoch(-24) +var UpgradeDragonHeight = abi.ChainEpoch(-25) -const UpgradeDragonHeight = 50 +const UpgradeAussieHeight = 50 const UpgradePhoenixHeight = UpgradeDragonHeight + 100 diff --git a/build/params_mainnet.go b/build/params_mainnet.go index 91a9b849759..e79acdca33d 100644 --- a/build/params_mainnet.go +++ b/build/params_mainnet.go @@ -100,11 +100,14 @@ const UpgradeThunderHeight = UpgradeLightningHeight + 2880*21 const UpgradeWatermelonHeight = 3469380 // 2024-04-24T14:00:00Z -var UpgradeDragonHeight = abi.ChainEpoch(3855360) +const UpgradeDragonHeight = 3855360 // This epoch, 120 epochs after the "rest" of the nv22 upgrade, is when we switch to Drand quicknet // 2024-04-11T15:00:00Z -var UpgradePhoenixHeight = UpgradeDragonHeight + 120 +const UpgradePhoenixHeight = UpgradeDragonHeight + 120 + +// ?????? +var UpgradeAussieHeight = abi.ChainEpoch(9999999999) // This fix upgrade only ran on calibrationnet const UpgradeWatermelonFixHeight = -1 @@ -130,10 +133,8 @@ func init() { SetAddressNetwork(address.Mainnet) } - if os.Getenv("LOTUS_DISABLE_DRAGON") == "1" { - UpgradeDragonHeight = math.MaxInt64 - 1 - delete(DrandSchedule, UpgradePhoenixHeight) - UpgradePhoenixHeight = math.MaxInt64 + if os.Getenv("LOTUS_DISABLE_AUSSIE") == "1" { + UpgradeAussieHeight = math.MaxInt64 - 1 } // NOTE: DO NOT change this unless you REALLY know what you're doing. This is not consensus critical, however, diff --git a/build/params_shared_vals.go b/build/params_shared_vals.go index 798a9f2a01d..25cfdd2ea32 100644 --- a/build/params_shared_vals.go +++ b/build/params_shared_vals.go @@ -30,7 +30,7 @@ const AllowableClockDriftSecs = uint64(1) /* inline-gen template const TestNetworkVersion = network.Version{{.latestNetworkVersion}} /* inline-gen start */ -const TestNetworkVersion = network.Version22 +const TestNetworkVersion = network.Version23 /* inline-gen end */ diff --git a/build/params_testground.go b/build/params_testground.go index 0fdc1027e73..beb296bd92e 100644 --- a/build/params_testground.go +++ b/build/params_testground.go @@ -115,6 +115,7 @@ var ( UpgradeDragonHeight abi.ChainEpoch = -26 UpgradePhoenixHeight abi.ChainEpoch = -27 UpgradeCalibrationDragonFixHeight abi.ChainEpoch = -28 + UpgradeAussieHeight abi.ChainEpoch = -29 DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, diff --git a/build/version.go b/build/version.go index 8fdaaafce98..ed0e0c84051 100644 --- a/build/version.go +++ b/build/version.go @@ -2,6 +2,8 @@ package build import "os" +type BuildVersion string + var CurrentCommit string var BuildType int @@ -36,13 +38,24 @@ func BuildTypeString() string { } } -// BuildVersion is the local build version -const BuildVersion = "1.27.0" +// NodeBuildVersion is the local build version of the Lotus daemon +const NodeBuildVersion string = "1.27.1" + +func NodeUserVersion() BuildVersion { + if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" { + return BuildVersion(NodeBuildVersion) + } + + return BuildVersion(NodeBuildVersion + BuildTypeString() + CurrentCommit) +} + +// MinerBuildVersion is the local build version of the Lotus miner +const MinerBuildVersion = "1.27.1" -func UserVersion() string { +func MinerUserVersion() BuildVersion { if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" { - return BuildVersion + return BuildVersion(MinerBuildVersion) } - return BuildVersion + BuildTypeString() + CurrentCommit + return BuildVersion(MinerBuildVersion + BuildTypeString() + CurrentCommit) } diff --git a/chain/actors/builtin/account/account.go b/chain/actors/builtin/account/account.go index 0f7ac209359..56a5fbada3b 100644 --- a/chain/actors/builtin/account/account.go +++ b/chain/actors/builtin/account/account.go @@ -6,7 +6,7 @@ import ( "github.com/filecoin-project/go-address" actorstypes "github.com/filecoin-project/go-state-types/actors" - builtin13 "github.com/filecoin-project/go-state-types/builtin" + builtin14 "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/manifest" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" @@ -22,7 +22,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) -var Methods = builtin13.MethodsAccount +var Methods = builtin14.MethodsAccount func Load(store adt.Store, act *types.Actor) (State, error) { if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { @@ -50,6 +50,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version13: return load13(store, act.Head) + case actorstypes.Version14: + return load14(store, act.Head) + } } @@ -123,6 +126,9 @@ func MakeState(store adt.Store, av actorstypes.Version, addr address.Address) (S case actorstypes.Version13: return make13(store, addr) + case actorstypes.Version14: + return make14(store, addr) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -153,5 +159,6 @@ func AllCodes() []cid.Cid { (&state11{}).Code(), (&state12{}).Code(), (&state13{}).Code(), + (&state14{}).Code(), } } diff --git a/chain/actors/builtin/account/v14.go b/chain/actors/builtin/account/v14.go new file mode 100644 index 00000000000..5071b1b3ba1 --- /dev/null +++ b/chain/actors/builtin/account/v14.go @@ -0,0 +1,62 @@ +package account + +import ( + "fmt" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + actorstypes "github.com/filecoin-project/go-state-types/actors" + account14 "github.com/filecoin-project/go-state-types/builtin/v14/account" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state14)(nil) + +func load14(store adt.Store, root cid.Cid) (State, error) { + out := state14{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make14(store adt.Store, addr address.Address) (State, error) { + out := state14{store: store} + out.State = account14.State{Address: addr} + return &out, nil +} + +type state14 struct { + account14.State + store adt.Store +} + +func (s *state14) PubkeyAddress() (address.Address, error) { + return s.Address, nil +} + +func (s *state14) GetState() interface{} { + return &s.State +} + +func (s *state14) ActorKey() string { + return manifest.AccountKey +} + +func (s *state14) ActorVersion() actorstypes.Version { + return actorstypes.Version14 +} + +func (s *state14) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/cron/cron.go b/chain/actors/builtin/cron/cron.go index 0c69cfca87f..bed64196607 100644 --- a/chain/actors/builtin/cron/cron.go +++ b/chain/actors/builtin/cron/cron.go @@ -5,7 +5,7 @@ import ( "golang.org/x/xerrors" actorstypes "github.com/filecoin-project/go-state-types/actors" - builtin13 "github.com/filecoin-project/go-state-types/builtin" + builtin14 "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/manifest" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" @@ -46,6 +46,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version13: return load13(store, act.Head) + case actorstypes.Version14: + return load14(store, act.Head) + } } @@ -119,13 +122,16 @@ func MakeState(store adt.Store, av actorstypes.Version) (State, error) { case actorstypes.Version13: return make13(store) + case actorstypes.Version14: + return make14(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } var ( - Address = builtin13.CronActorAddr - Methods = builtin13.MethodsCron + Address = builtin14.CronActorAddr + Methods = builtin14.MethodsCron ) type State interface { @@ -151,5 +157,6 @@ func AllCodes() []cid.Cid { (&state11{}).Code(), (&state12{}).Code(), (&state13{}).Code(), + (&state14{}).Code(), } } diff --git a/chain/actors/builtin/cron/v14.go b/chain/actors/builtin/cron/v14.go new file mode 100644 index 00000000000..6117b003e09 --- /dev/null +++ b/chain/actors/builtin/cron/v14.go @@ -0,0 +1,57 @@ +package cron + +import ( + "fmt" + + "github.com/ipfs/go-cid" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + cron14 "github.com/filecoin-project/go-state-types/builtin/v14/cron" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state14)(nil) + +func load14(store adt.Store, root cid.Cid) (State, error) { + out := state14{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make14(store adt.Store) (State, error) { + out := state14{store: store} + out.State = *cron14.ConstructState(cron14.BuiltInEntries()) + return &out, nil +} + +type state14 struct { + cron14.State + store adt.Store +} + +func (s *state14) GetState() interface{} { + return &s.State +} + +func (s *state14) ActorKey() string { + return manifest.CronKey +} + +func (s *state14) ActorVersion() actorstypes.Version { + return actorstypes.Version14 +} + +func (s *state14) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/datacap/datacap.go b/chain/actors/builtin/datacap/datacap.go index 7f5ee6c0bd3..8e81a688197 100644 --- a/chain/actors/builtin/datacap/datacap.go +++ b/chain/actors/builtin/datacap/datacap.go @@ -7,7 +7,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" - builtin13 "github.com/filecoin-project/go-state-types/builtin" + builtin14 "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/manifest" @@ -17,8 +17,8 @@ import ( ) var ( - Address = builtin13.DatacapActorAddr - Methods = builtin13.MethodsDatacap + Address = builtin14.DatacapActorAddr + Methods = builtin14.MethodsDatacap ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -44,6 +44,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version13: return load13(store, act.Head) + case actorstypes.Version14: + return load14(store, act.Head) + } } @@ -68,6 +71,9 @@ func MakeState(store adt.Store, av actorstypes.Version, governor address.Address case actorstypes.Version13: return make13(store, governor, bitwidth) + case actorstypes.Version14: + return make14(store, governor, bitwidth) + default: return nil, xerrors.Errorf("datacap actor only valid for actors v9 and above, got %d", av) } @@ -93,5 +99,6 @@ func AllCodes() []cid.Cid { (&state11{}).Code(), (&state12{}).Code(), (&state13{}).Code(), + (&state14{}).Code(), } } diff --git a/chain/actors/builtin/datacap/v14.go b/chain/actors/builtin/datacap/v14.go new file mode 100644 index 00000000000..4fd43ef7916 --- /dev/null +++ b/chain/actors/builtin/datacap/v14.go @@ -0,0 +1,82 @@ +package datacap + +import ( + "fmt" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + datacap14 "github.com/filecoin-project/go-state-types/builtin/v14/datacap" + adt14 "github.com/filecoin-project/go-state-types/builtin/v14/util/adt" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state14)(nil) + +func load14(store adt.Store, root cid.Cid) (State, error) { + out := state14{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make14(store adt.Store, governor address.Address, bitwidth uint64) (State, error) { + out := state14{store: store} + s, err := datacap14.ConstructState(store, governor, bitwidth) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state14 struct { + datacap14.State + store adt.Store +} + +func (s *state14) Governor() (address.Address, error) { + return s.State.Governor, nil +} + +func (s *state14) GetState() interface{} { + return &s.State +} + +func (s *state14) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachClient(s.store, actors.Version14, s.verifiedClients, cb) +} + +func (s *state14) verifiedClients() (adt.Map, error) { + return adt14.AsMap(s.store, s.Token.Balances, int(s.Token.HamtBitWidth)) +} + +func (s *state14) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version14, s.verifiedClients, addr) +} + +func (s *state14) ActorKey() string { + return manifest.DatacapKey +} + +func (s *state14) ActorVersion() actorstypes.Version { + return actorstypes.Version14 +} + +func (s *state14) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/evm/evm.go b/chain/actors/builtin/evm/evm.go index 5bda457cd36..01766189f6a 100644 --- a/chain/actors/builtin/evm/evm.go +++ b/chain/actors/builtin/evm/evm.go @@ -5,7 +5,7 @@ import ( "golang.org/x/xerrors" actorstypes "github.com/filecoin-project/go-state-types/actors" - builtin13 "github.com/filecoin-project/go-state-types/builtin" + builtin14 "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-state-types/manifest" @@ -15,7 +15,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) -var Methods = builtin13.MethodsEVM +var Methods = builtin14.MethodsEVM // See https://github.com/filecoin-project/builtin-actors/blob/6e781444cee5965278c46ef4ffe1fb1970f18d7d/actors/evm/src/lib.rs#L35-L42 const ( @@ -49,6 +49,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version13: return load13(store, act.Head) + case actorstypes.Version14: + return load14(store, act.Head) + } } @@ -70,6 +73,9 @@ func MakeState(store adt.Store, av actorstypes.Version, bytecode cid.Cid) (State case actorstypes.Version13: return make13(store, bytecode) + case actorstypes.Version14: + return make14(store, bytecode) + default: return nil, xerrors.Errorf("evm actor only valid for actors v10 and above, got %d", av) } diff --git a/chain/actors/builtin/evm/v14.go b/chain/actors/builtin/evm/v14.go new file mode 100644 index 00000000000..1b6650602ee --- /dev/null +++ b/chain/actors/builtin/evm/v14.go @@ -0,0 +1,72 @@ +package evm + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/abi" + evm14 "github.com/filecoin-project/go-state-types/builtin/v14/evm" + + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state14)(nil) + +func load14(store adt.Store, root cid.Cid) (State, error) { + out := state14{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make14(store adt.Store, bytecode cid.Cid) (State, error) { + out := state14{store: store} + s, err := evm14.ConstructState(store, bytecode) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state14 struct { + evm14.State + store adt.Store +} + +func (s *state14) Nonce() (uint64, error) { + return s.State.Nonce, nil +} + +func (s *state14) IsAlive() (bool, error) { + return s.State.Tombstone == nil, nil +} + +func (s *state14) GetState() interface{} { + return &s.State +} + +func (s *state14) GetBytecodeCID() (cid.Cid, error) { + return s.State.Bytecode, nil +} + +func (s *state14) GetBytecodeHash() ([32]byte, error) { + return s.State.BytecodeHash, nil +} + +func (s *state14) GetBytecode() ([]byte, error) { + bc, err := s.GetBytecodeCID() + if err != nil { + return nil, err + } + + var byteCode abi.CborBytesTransparent + if err := s.store.Get(s.store.Context(), bc, &byteCode); err != nil { + return nil, err + } + + return byteCode, nil +} diff --git a/chain/actors/builtin/init/init.go b/chain/actors/builtin/init/init.go index de1c6274e21..d9c8052eaa1 100644 --- a/chain/actors/builtin/init/init.go +++ b/chain/actors/builtin/init/init.go @@ -7,7 +7,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" - builtin13 "github.com/filecoin-project/go-state-types/builtin" + builtin14 "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/manifest" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" @@ -25,8 +25,8 @@ import ( ) var ( - Address = builtin13.InitActorAddr - Methods = builtin13.MethodsInit + Address = builtin14.InitActorAddr + Methods = builtin14.MethodsInit ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -55,6 +55,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version13: return load13(store, act.Head) + case actorstypes.Version14: + return load14(store, act.Head) + } } @@ -128,6 +131,9 @@ func MakeState(store adt.Store, av actorstypes.Version, networkName string) (Sta case actorstypes.Version13: return make13(store, networkName) + case actorstypes.Version14: + return make14(store, networkName) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -181,5 +187,6 @@ func AllCodes() []cid.Cid { (&state11{}).Code(), (&state12{}).Code(), (&state13{}).Code(), + (&state14{}).Code(), } } diff --git a/chain/actors/builtin/init/v14.go b/chain/actors/builtin/init/v14.go new file mode 100644 index 00000000000..1319c12f9ff --- /dev/null +++ b/chain/actors/builtin/init/v14.go @@ -0,0 +1,147 @@ +package init + +import ( + "crypto/sha256" + "fmt" + + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + builtin14 "github.com/filecoin-project/go-state-types/builtin" + init14 "github.com/filecoin-project/go-state-types/builtin/v14/init" + adt14 "github.com/filecoin-project/go-state-types/builtin/v14/util/adt" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/node/modules/dtypes" +) + +var _ State = (*state14)(nil) + +func load14(store adt.Store, root cid.Cid) (State, error) { + out := state14{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make14(store adt.Store, networkName string) (State, error) { + out := state14{store: store} + + s, err := init14.ConstructState(store, networkName) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state14 struct { + init14.State + store adt.Store +} + +func (s *state14) ResolveAddress(address address.Address) (address.Address, bool, error) { + return s.State.ResolveAddress(s.store, address) +} + +func (s *state14) MapAddressToNewID(address address.Address) (address.Address, error) { + return s.State.MapAddressToNewID(s.store, address) +} + +func (s *state14) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error { + addrs, err := adt14.AsMap(s.store, s.State.AddressMap, builtin14.DefaultHamtBitwidth) + if err != nil { + return err + } + var actorID cbg.CborInt + return addrs.ForEach(&actorID, func(key string) error { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(abi.ActorID(actorID), addr) + }) +} + +func (s *state14) NetworkName() (dtypes.NetworkName, error) { + return dtypes.NetworkName(s.State.NetworkName), nil +} + +func (s *state14) SetNetworkName(name string) error { + s.State.NetworkName = name + return nil +} + +func (s *state14) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + +func (s *state14) Remove(addrs ...address.Address) (err error) { + m, err := adt14.AsMap(s.store, s.State.AddressMap, builtin14.DefaultHamtBitwidth) + if err != nil { + return err + } + for _, addr := range addrs { + if err = m.Delete(abi.AddrKey(addr)); err != nil { + return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err) + } + } + amr, err := m.Root() + if err != nil { + return xerrors.Errorf("failed to get address map root: %w", err) + } + s.State.AddressMap = amr + return nil +} + +func (s *state14) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state14) GetState() interface{} { + return &s.State +} + +func (s *state14) AddressMap() (adt.Map, error) { + return adt14.AsMap(s.store, s.State.AddressMap, builtin14.DefaultHamtBitwidth) +} + +func (s *state14) AddressMapBitWidth() int { + return builtin14.DefaultHamtBitwidth +} + +func (s *state14) AddressMapHashFunction() func(input []byte) []byte { + return func(input []byte) []byte { + res := sha256.Sum256(input) + return res[:] + } +} + +func (s *state14) ActorKey() string { + return manifest.InitKey +} + +func (s *state14) ActorVersion() actorstypes.Version { + return actorstypes.Version14 +} + +func (s *state14) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/market/actor.go.template b/chain/actors/builtin/market/actor.go.template index 0604737b357..f913cb0abaa 100644 --- a/chain/actors/builtin/market/actor.go.template +++ b/chain/actors/builtin/market/actor.go.template @@ -143,6 +143,7 @@ type DealProposal = markettypes.DealProposal type DealLabel = markettypes.DealLabel type DealState interface { + SectorNumber() abi.SectorNumber // 0 if not yet included in proven sector (0 is also a valid sector number) SectorStartEpoch() abi.ChainEpoch // -1 if not yet included in proven sector LastUpdatedEpoch() abi.ChainEpoch // -1 if deal state never updated SlashEpoch() abi.ChainEpoch // -1 if deal never slashed @@ -185,6 +186,10 @@ type ProposalIDState struct { type emptyDealState struct{} +func (e *emptyDealState) SectorNumber() abi.SectorNumber { + return 0 +} + func (e *emptyDealState) SectorStartEpoch() abi.ChainEpoch { return -1 } diff --git a/chain/actors/builtin/market/market.go b/chain/actors/builtin/market/market.go index 13c09f91bcf..c32bf36676a 100644 --- a/chain/actors/builtin/market/market.go +++ b/chain/actors/builtin/market/market.go @@ -61,6 +61,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version13: return load13(store, act.Head) + case actorstypes.Version14: + return load14(store, act.Head) + } } @@ -134,6 +137,9 @@ func MakeState(store adt.Store, av actorstypes.Version) (State, error) { case actorstypes.Version13: return make13(store) + case actorstypes.Version14: + return make14(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -235,6 +241,9 @@ func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStora case actorstypes.Version13: return decodePublishStorageDealsReturn13(b) + case actorstypes.Version14: + return decodePublishStorageDealsReturn14(b) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -243,6 +252,7 @@ type DealProposal = markettypes.DealProposal type DealLabel = markettypes.DealLabel type DealState interface { + SectorNumber() abi.SectorNumber // 0 if not yet included in proven sector (0 is also a valid sector number) SectorStartEpoch() abi.ChainEpoch // -1 if not yet included in proven sector LastUpdatedEpoch() abi.ChainEpoch // -1 if deal state never updated SlashEpoch() abi.ChainEpoch // -1 if deal never slashed @@ -284,6 +294,10 @@ type ProposalIDState struct { type emptyDealState struct{} +func (e *emptyDealState) SectorNumber() abi.SectorNumber { + return 0 +} + func (e *emptyDealState) SectorStartEpoch() abi.ChainEpoch { return -1 } @@ -356,5 +370,6 @@ func AllCodes() []cid.Cid { (&state11{}).Code(), (&state12{}).Code(), (&state13{}).Code(), + (&state14{}).Code(), } } diff --git a/chain/actors/builtin/market/state.go.template b/chain/actors/builtin/market/state.go.template index 4670576602d..19f2c17aba1 100644 --- a/chain/actors/builtin/market/state.go.template +++ b/chain/actors/builtin/market/state.go.template @@ -212,6 +212,14 @@ type dealStateV{{.v}} struct { ds{{.v}} market{{.v}}.DealState } +func (d dealStateV{{.v}}) SectorNumber() abi.SectorNumber { +{{if (le .v 12)}} + return 0 +{{else}} + return d.ds{{.v}}.SectorNumber +{{end}} +} + func (d dealStateV{{.v}}) SectorStartEpoch() abi.ChainEpoch { return d.ds{{.v}}.SectorStartEpoch } diff --git a/chain/actors/builtin/market/v0.go b/chain/actors/builtin/market/v0.go index d797d53f8c6..cd207ac2c03 100644 --- a/chain/actors/builtin/market/v0.go +++ b/chain/actors/builtin/market/v0.go @@ -191,6 +191,12 @@ type dealStateV0 struct { ds0 market0.DealState } +func (d dealStateV0) SectorNumber() abi.SectorNumber { + + return 0 + +} + func (d dealStateV0) SectorStartEpoch() abi.ChainEpoch { return d.ds0.SectorStartEpoch } diff --git a/chain/actors/builtin/market/v10.go b/chain/actors/builtin/market/v10.go index 290c17d092f..9d6e9798966 100644 --- a/chain/actors/builtin/market/v10.go +++ b/chain/actors/builtin/market/v10.go @@ -190,6 +190,12 @@ type dealStateV10 struct { ds10 market10.DealState } +func (d dealStateV10) SectorNumber() abi.SectorNumber { + + return 0 + +} + func (d dealStateV10) SectorStartEpoch() abi.ChainEpoch { return d.ds10.SectorStartEpoch } diff --git a/chain/actors/builtin/market/v11.go b/chain/actors/builtin/market/v11.go index 56a4c6038de..69620f558f7 100644 --- a/chain/actors/builtin/market/v11.go +++ b/chain/actors/builtin/market/v11.go @@ -190,6 +190,12 @@ type dealStateV11 struct { ds11 market11.DealState } +func (d dealStateV11) SectorNumber() abi.SectorNumber { + + return 0 + +} + func (d dealStateV11) SectorStartEpoch() abi.ChainEpoch { return d.ds11.SectorStartEpoch } diff --git a/chain/actors/builtin/market/v12.go b/chain/actors/builtin/market/v12.go index cf7687203f9..7ae77760516 100644 --- a/chain/actors/builtin/market/v12.go +++ b/chain/actors/builtin/market/v12.go @@ -190,6 +190,12 @@ type dealStateV12 struct { ds12 market12.DealState } +func (d dealStateV12) SectorNumber() abi.SectorNumber { + + return 0 + +} + func (d dealStateV12) SectorStartEpoch() abi.ChainEpoch { return d.ds12.SectorStartEpoch } diff --git a/chain/actors/builtin/market/v13.go b/chain/actors/builtin/market/v13.go index d270319ce6f..6a5f5877819 100644 --- a/chain/actors/builtin/market/v13.go +++ b/chain/actors/builtin/market/v13.go @@ -190,6 +190,12 @@ type dealStateV13 struct { ds13 market13.DealState } +func (d dealStateV13) SectorNumber() abi.SectorNumber { + + return d.ds13.SectorNumber + +} + func (d dealStateV13) SectorStartEpoch() abi.ChainEpoch { return d.ds13.SectorStartEpoch } diff --git a/chain/actors/builtin/market/v14.go b/chain/actors/builtin/market/v14.go new file mode 100644 index 00000000000..0eea2375f9e --- /dev/null +++ b/chain/actors/builtin/market/v14.go @@ -0,0 +1,410 @@ +package market + +import ( + "bytes" + "fmt" + + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + rlepluslazy "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/builtin" + market14 "github.com/filecoin-project/go-state-types/builtin/v14/market" + adt14 "github.com/filecoin-project/go-state-types/builtin/v14/util/adt" + markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" + "github.com/filecoin-project/lotus/chain/types" +) + +var _ State = (*state14)(nil) + +func load14(store adt.Store, root cid.Cid) (State, error) { + out := state14{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make14(store adt.Store) (State, error) { + out := state14{store: store} + + s, err := market14.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state14 struct { + market14.State + store adt.Store +} + +func (s *state14) TotalLocked() (abi.TokenAmount, error) { + fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral) + fml = types.BigAdd(fml, s.TotalClientStorageFee) + return fml, nil +} + +func (s *state14) BalancesChanged(otherState State) (bool, error) { + otherState14, ok := otherState.(*state14) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.EscrowTable.Equals(otherState14.State.EscrowTable) || !s.State.LockedTable.Equals(otherState14.State.LockedTable), nil +} + +func (s *state14) StatesChanged(otherState State) (bool, error) { + otherState14, ok := otherState.(*state14) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.States.Equals(otherState14.State.States), nil +} + +func (s *state14) States() (DealStates, error) { + stateArray, err := adt14.AsArray(s.store, s.State.States, market14.StatesAmtBitwidth) + if err != nil { + return nil, err + } + return &dealStates14{stateArray}, nil +} + +func (s *state14) ProposalsChanged(otherState State) (bool, error) { + otherState14, ok := otherState.(*state14) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.Proposals.Equals(otherState14.State.Proposals), nil +} + +func (s *state14) Proposals() (DealProposals, error) { + proposalArray, err := adt14.AsArray(s.store, s.State.Proposals, market14.ProposalsAmtBitwidth) + if err != nil { + return nil, err + } + return &dealProposals14{proposalArray}, nil +} + +func (s *state14) EscrowTable() (BalanceTable, error) { + bt, err := adt14.AsBalanceTable(s.store, s.State.EscrowTable) + if err != nil { + return nil, err + } + return &balanceTable14{bt}, nil +} + +func (s *state14) LockedTable() (BalanceTable, error) { + bt, err := adt14.AsBalanceTable(s.store, s.State.LockedTable) + if err != nil { + return nil, err + } + return &balanceTable14{bt}, nil +} + +func (s *state14) VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, +) (weight, verifiedWeight abi.DealWeight, err error) { + w, vw, _, err := market14.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch) + return w, vw, err +} + +func (s *state14) NextID() (abi.DealID, error) { + return s.State.NextID, nil +} + +type balanceTable14 struct { + *adt14.BalanceTable +} + +func (bt *balanceTable14) ForEach(cb func(address.Address, abi.TokenAmount) error) error { + asMap := (*adt14.Map)(bt.BalanceTable) + var ta abi.TokenAmount + return asMap.ForEach(&ta, func(key string) error { + a, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(a, ta) + }) +} + +type dealStates14 struct { + adt.Array +} + +func (s *dealStates14) Get(dealID abi.DealID) (DealState, bool, error) { + var deal14 market14.DealState + found, err := s.Array.Get(uint64(dealID), &deal14) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + deal := fromV14DealState(deal14) + return deal, true, nil +} + +func (s *dealStates14) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { + var ds14 market14.DealState + return s.Array.ForEach(&ds14, func(idx int64) error { + return cb(abi.DealID(idx), fromV14DealState(ds14)) + }) +} + +func (s *dealStates14) decode(val *cbg.Deferred) (DealState, error) { + var ds14 market14.DealState + if err := ds14.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + ds := fromV14DealState(ds14) + return ds, nil +} + +func (s *dealStates14) array() adt.Array { + return s.Array +} + +type dealStateV14 struct { + ds14 market14.DealState +} + +func (d dealStateV14) SectorNumber() abi.SectorNumber { + + return d.ds14.SectorNumber + +} + +func (d dealStateV14) SectorStartEpoch() abi.ChainEpoch { + return d.ds14.SectorStartEpoch +} + +func (d dealStateV14) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds14.LastUpdatedEpoch +} + +func (d dealStateV14) SlashEpoch() abi.ChainEpoch { + return d.ds14.SlashEpoch +} + +func (d dealStateV14) Equals(other DealState) bool { + if ov14, ok := other.(dealStateV14); ok { + return d.ds14 == ov14.ds14 + } + + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV14)(nil) + +func fromV14DealState(v14 market14.DealState) DealState { + return dealStateV14{v14} +} + +type dealProposals14 struct { + adt.Array +} + +func (s *dealProposals14) Get(dealID abi.DealID) (*DealProposal, bool, error) { + var proposal14 market14.DealProposal + found, err := s.Array.Get(uint64(dealID), &proposal14) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + + proposal, err := fromV14DealProposal(proposal14) + if err != nil { + return nil, true, xerrors.Errorf("decoding proposal: %w", err) + } + + return &proposal, true, nil +} + +func (s *dealProposals14) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error { + var dp14 market14.DealProposal + return s.Array.ForEach(&dp14, func(idx int64) error { + dp, err := fromV14DealProposal(dp14) + if err != nil { + return xerrors.Errorf("decoding proposal: %w", err) + } + + return cb(abi.DealID(idx), dp) + }) +} + +func (s *dealProposals14) decode(val *cbg.Deferred) (*DealProposal, error) { + var dp14 market14.DealProposal + if err := dp14.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + + dp, err := fromV14DealProposal(dp14) + if err != nil { + return nil, err + } + + return &dp, nil +} + +func (s *dealProposals14) array() adt.Array { + return s.Array +} + +func fromV14DealProposal(v14 market14.DealProposal) (DealProposal, error) { + + label, err := fromV14Label(v14.Label) + + if err != nil { + return DealProposal{}, xerrors.Errorf("error setting deal label: %w", err) + } + + return DealProposal{ + PieceCID: v14.PieceCID, + PieceSize: v14.PieceSize, + VerifiedDeal: v14.VerifiedDeal, + Client: v14.Client, + Provider: v14.Provider, + + Label: label, + + StartEpoch: v14.StartEpoch, + EndEpoch: v14.EndEpoch, + StoragePricePerEpoch: v14.StoragePricePerEpoch, + + ProviderCollateral: v14.ProviderCollateral, + ClientCollateral: v14.ClientCollateral, + }, nil +} + +func fromV14Label(v14 market14.DealLabel) (DealLabel, error) { + if v14.IsString() { + str, err := v14.ToString() + if err != nil { + return markettypes.EmptyDealLabel, xerrors.Errorf("failed to convert string label to string: %w", err) + } + return markettypes.NewLabelFromString(str) + } + + bs, err := v14.ToBytes() + if err != nil { + return markettypes.EmptyDealLabel, xerrors.Errorf("failed to convert bytes label to bytes: %w", err) + } + return markettypes.NewLabelFromBytes(bs) +} + +func (s *state14) GetState() interface{} { + return &s.State +} + +var _ PublishStorageDealsReturn = (*publishStorageDealsReturn14)(nil) + +func decodePublishStorageDealsReturn14(b []byte) (PublishStorageDealsReturn, error) { + var retval market14.PublishStorageDealsReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, xerrors.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err) + } + + return &publishStorageDealsReturn14{retval}, nil +} + +type publishStorageDealsReturn14 struct { + market14.PublishStorageDealsReturn +} + +func (r *publishStorageDealsReturn14) IsDealValid(index uint64) (bool, int, error) { + + set, err := r.ValidDeals.IsSet(index) + if err != nil || !set { + return false, -1, err + } + maskBf, err := bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{ + Runs: []rlepluslazy.Run{rlepluslazy.Run{Val: true, Len: index}}}) + if err != nil { + return false, -1, err + } + before, err := bitfield.IntersectBitField(maskBf, r.ValidDeals) + if err != nil { + return false, -1, err + } + outIdx, err := before.Count() + if err != nil { + return false, -1, err + } + return set, int(outIdx), nil + +} + +func (r *publishStorageDealsReturn14) DealIDs() ([]abi.DealID, error) { + return r.IDs, nil +} + +func (s *state14) GetAllocationIdForPendingDeal(dealId abi.DealID) (verifregtypes.AllocationId, error) { + + allocations, err := adt14.AsMap(s.store, s.PendingDealAllocationIds, builtin.DefaultHamtBitwidth) + if err != nil { + return verifregtypes.NoAllocationID, xerrors.Errorf("failed to load allocation id for %d: %w", dealId, err) + } + + var allocationId cbg.CborInt + found, err := allocations.Get(abi.UIntKey(uint64(dealId)), &allocationId) + if err != nil { + return verifregtypes.NoAllocationID, xerrors.Errorf("failed to load allocation id for %d: %w", dealId, err) + } + if !found { + return verifregtypes.NoAllocationID, nil + } + + return verifregtypes.AllocationId(allocationId), nil + +} + +func (s *state14) ActorKey() string { + return manifest.MarketKey +} + +func (s *state14) ActorVersion() actorstypes.Version { + return actorstypes.Version14 +} + +func (s *state14) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/market/v2.go b/chain/actors/builtin/market/v2.go index 5ced3c8a337..3c1f376ecdc 100644 --- a/chain/actors/builtin/market/v2.go +++ b/chain/actors/builtin/market/v2.go @@ -191,6 +191,12 @@ type dealStateV2 struct { ds2 market2.DealState } +func (d dealStateV2) SectorNumber() abi.SectorNumber { + + return 0 + +} + func (d dealStateV2) SectorStartEpoch() abi.ChainEpoch { return d.ds2.SectorStartEpoch } diff --git a/chain/actors/builtin/market/v3.go b/chain/actors/builtin/market/v3.go index 35dd9c29a55..7e8d60fea19 100644 --- a/chain/actors/builtin/market/v3.go +++ b/chain/actors/builtin/market/v3.go @@ -186,6 +186,12 @@ type dealStateV3 struct { ds3 market3.DealState } +func (d dealStateV3) SectorNumber() abi.SectorNumber { + + return 0 + +} + func (d dealStateV3) SectorStartEpoch() abi.ChainEpoch { return d.ds3.SectorStartEpoch } diff --git a/chain/actors/builtin/market/v4.go b/chain/actors/builtin/market/v4.go index bc9e61c8888..08beaf51588 100644 --- a/chain/actors/builtin/market/v4.go +++ b/chain/actors/builtin/market/v4.go @@ -186,6 +186,12 @@ type dealStateV4 struct { ds4 market4.DealState } +func (d dealStateV4) SectorNumber() abi.SectorNumber { + + return 0 + +} + func (d dealStateV4) SectorStartEpoch() abi.ChainEpoch { return d.ds4.SectorStartEpoch } diff --git a/chain/actors/builtin/market/v5.go b/chain/actors/builtin/market/v5.go index 63743ba8d10..93a05355de0 100644 --- a/chain/actors/builtin/market/v5.go +++ b/chain/actors/builtin/market/v5.go @@ -186,6 +186,12 @@ type dealStateV5 struct { ds5 market5.DealState } +func (d dealStateV5) SectorNumber() abi.SectorNumber { + + return 0 + +} + func (d dealStateV5) SectorStartEpoch() abi.ChainEpoch { return d.ds5.SectorStartEpoch } diff --git a/chain/actors/builtin/market/v6.go b/chain/actors/builtin/market/v6.go index 5900eace953..ec91573184e 100644 --- a/chain/actors/builtin/market/v6.go +++ b/chain/actors/builtin/market/v6.go @@ -188,6 +188,12 @@ type dealStateV6 struct { ds6 market6.DealState } +func (d dealStateV6) SectorNumber() abi.SectorNumber { + + return 0 + +} + func (d dealStateV6) SectorStartEpoch() abi.ChainEpoch { return d.ds6.SectorStartEpoch } diff --git a/chain/actors/builtin/market/v7.go b/chain/actors/builtin/market/v7.go index f51f070c7f2..5e49fb5dad3 100644 --- a/chain/actors/builtin/market/v7.go +++ b/chain/actors/builtin/market/v7.go @@ -188,6 +188,12 @@ type dealStateV7 struct { ds7 market7.DealState } +func (d dealStateV7) SectorNumber() abi.SectorNumber { + + return 0 + +} + func (d dealStateV7) SectorStartEpoch() abi.ChainEpoch { return d.ds7.SectorStartEpoch } diff --git a/chain/actors/builtin/market/v8.go b/chain/actors/builtin/market/v8.go index f9bf25f9c7f..0ea4bf84b0f 100644 --- a/chain/actors/builtin/market/v8.go +++ b/chain/actors/builtin/market/v8.go @@ -189,6 +189,12 @@ type dealStateV8 struct { ds8 market8.DealState } +func (d dealStateV8) SectorNumber() abi.SectorNumber { + + return 0 + +} + func (d dealStateV8) SectorStartEpoch() abi.ChainEpoch { return d.ds8.SectorStartEpoch } diff --git a/chain/actors/builtin/market/v9.go b/chain/actors/builtin/market/v9.go index 3b5be4dfa26..7805efca23d 100644 --- a/chain/actors/builtin/market/v9.go +++ b/chain/actors/builtin/market/v9.go @@ -190,6 +190,12 @@ type dealStateV9 struct { ds9 market9.DealState } +func (d dealStateV9) SectorNumber() abi.SectorNumber { + + return 0 + +} + func (d dealStateV9) SectorStartEpoch() abi.ChainEpoch { return d.ds9.SectorStartEpoch } diff --git a/chain/actors/builtin/miner/miner.go b/chain/actors/builtin/miner/miner.go index cdf0046f587..e3e0f617996 100644 --- a/chain/actors/builtin/miner/miner.go +++ b/chain/actors/builtin/miner/miner.go @@ -55,6 +55,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version13: return load13(store, act.Head) + case actorstypes.Version14: + return load14(store, act.Head) + } } @@ -128,6 +131,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version13: return make13(store) + case actors.Version14: + return make14(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -391,5 +397,6 @@ func AllCodes() []cid.Cid { (&state11{}).Code(), (&state12{}).Code(), (&state13{}).Code(), + (&state14{}).Code(), } } diff --git a/chain/actors/builtin/miner/v14.go b/chain/actors/builtin/miner/v14.go new file mode 100644 index 00000000000..3153bead3fd --- /dev/null +++ b/chain/actors/builtin/miner/v14.go @@ -0,0 +1,595 @@ +package miner + +import ( + "bytes" + "errors" + "fmt" + + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + builtin14 "github.com/filecoin-project/go-state-types/builtin" + miner14 "github.com/filecoin-project/go-state-types/builtin/v14/miner" + adt14 "github.com/filecoin-project/go-state-types/builtin/v14/util/adt" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state14)(nil) + +func load14(store adt.Store, root cid.Cid) (State, error) { + out := state14{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make14(store adt.Store) (State, error) { + out := state14{store: store} + out.State = miner14.State{} + return &out, nil +} + +type state14 struct { + miner14.State + store adt.Store +} + +type deadline14 struct { + miner14.Deadline + store adt.Store +} + +type partition14 struct { + miner14.Partition + store adt.Store +} + +func (s *state14) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) { + defer func() { + if r := recover(); r != nil { + err = xerrors.Errorf("failed to get available balance: %w", r) + available = abi.NewTokenAmount(0) + } + }() + // this panics if the miner doesn't have enough funds to cover their locked pledge + available, err = s.GetAvailableBalance(bal) + return available, err +} + +func (s *state14) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.CheckVestedFunds(s.store, epoch) +} + +func (s *state14) LockedFunds() (LockedFunds, error) { + return LockedFunds{ + VestingFunds: s.State.LockedFunds, + InitialPledgeRequirement: s.State.InitialPledge, + PreCommitDeposits: s.State.PreCommitDeposits, + }, nil +} + +func (s *state14) FeeDebt() (abi.TokenAmount, error) { + return s.State.FeeDebt, nil +} + +func (s *state14) InitialPledge() (abi.TokenAmount, error) { + return s.State.InitialPledge, nil +} + +func (s *state14) PreCommitDeposits() (abi.TokenAmount, error) { + return s.State.PreCommitDeposits, nil +} + +// Returns nil, nil if sector is not found +func (s *state14) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) { + info, ok, err := s.State.GetSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV14SectorOnChainInfo(*info) + return &ret, nil +} + +func (s *state14) FindSector(num abi.SectorNumber) (*SectorLocation, error) { + dlIdx, partIdx, err := s.State.FindSector(s.store, num) + if err != nil { + return nil, err + } + return &SectorLocation{ + Deadline: dlIdx, + Partition: partIdx, + }, nil +} + +func (s *state14) NumLiveSectors() (uint64, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return 0, err + } + var total uint64 + if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner14.Deadline) error { + total += dl.LiveSectors + return nil + }); err != nil { + return 0, err + } + return total, nil +} + +// GetSectorExpiration returns the effective expiration of the given sector. +// +// If the sector does not expire early, the Early expiration field is 0. +func (s *state14) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + // NOTE: this can be optimized significantly. + // 1. If the sector is non-faulty, it will expire on-time (can be + // learned from the sector info). + // 2. If it's faulty, it will expire early within the first 42 entries + // of the expiration queue. + + stopErr := errors.New("stop") + out := SectorExpiration{} + err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner14.Deadline) error { + partitions, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + quant := s.State.QuantSpecForDeadline(dlIdx) + var part miner14.Partition + return partitions.ForEach(&part, func(partIdx int64) error { + if found, err := part.Sectors.IsSet(uint64(num)); err != nil { + return err + } else if !found { + return nil + } + if found, err := part.Terminated.IsSet(uint64(num)); err != nil { + return err + } else if found { + // already terminated + return stopErr + } + + q, err := miner14.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner14.PartitionExpirationAmtBitwidth) + if err != nil { + return err + } + var exp miner14.ExpirationSet + return q.ForEach(&exp, func(epoch int64) error { + if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil { + return err + } else if early { + out.Early = abi.ChainEpoch(epoch) + return nil + } + if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil { + return err + } else if onTime { + out.OnTime = abi.ChainEpoch(epoch) + return stopErr + } + return nil + }) + }) + }) + if err == stopErr { + err = nil + } + if err != nil { + return nil, err + } + if out.Early == 0 && out.OnTime == 0 { + return nil, xerrors.Errorf("failed to find sector %d", num) + } + return &out, nil +} + +func (s *state14) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) { + info, ok, err := s.State.GetPrecommittedSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV14SectorPreCommitOnChainInfo(*info) + + return &ret, nil +} + +func (s *state14) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error { + precommitted, err := adt14.AsMap(s.store, s.State.PreCommittedSectors, builtin14.DefaultHamtBitwidth) + if err != nil { + return err + } + + var info miner14.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV14SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + +func (s *state14) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { + sectors, err := miner14.LoadSectors(s.store, s.State.Sectors) + if err != nil { + return nil, err + } + + // If no sector numbers are specified, load all. + if snos == nil { + infos := make([]*SectorOnChainInfo, 0, sectors.Length()) + var info14 miner14.SectorOnChainInfo + if err := sectors.ForEach(&info14, func(_ int64) error { + info := fromV14SectorOnChainInfo(info14) + infos = append(infos, &info) + return nil + }); err != nil { + return nil, err + } + return infos, nil + } + + // Otherwise, load selected. + infos14, err := sectors.Load(*snos) + if err != nil { + return nil, err + } + infos := make([]*SectorOnChainInfo, len(infos14)) + for i, info14 := range infos14 { + info := fromV14SectorOnChainInfo(*info14) + infos[i] = &info + } + return infos, nil +} + +func (s *state14) loadAllocatedSectorNumbers() (bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state14) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return false, err + } + + return allocatedSectors.IsSet(uint64(num)) +} + +func (s *state14) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state14) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + +func (s *state14) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + +func (s *state14) LoadDeadline(idx uint64) (Deadline, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + dl, err := dls.LoadDeadline(s.store, idx) + if err != nil { + return nil, err + } + return &deadline14{*dl, s.store}, nil +} + +func (s *state14) ForEachDeadline(cb func(uint64, Deadline) error) error { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + return dls.ForEach(s.store, func(i uint64, dl *miner14.Deadline) error { + return cb(i, &deadline14{*dl, s.store}) + }) +} + +func (s *state14) NumDeadlines() (uint64, error) { + return miner14.WPoStPeriodDeadlines, nil +} + +func (s *state14) DeadlinesChanged(other State) (bool, error) { + other14, ok := other.(*state14) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !s.State.Deadlines.Equals(other14.Deadlines), nil +} + +func (s *state14) MinerInfoChanged(other State) (bool, error) { + other0, ok := other.(*state14) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Info.Equals(other0.State.Info), nil +} + +func (s *state14) Info() (MinerInfo, error) { + info, err := s.State.GetInfo(s.store) + if err != nil { + return MinerInfo{}, err + } + + mi := MinerInfo{ + Owner: info.Owner, + Worker: info.Worker, + ControlAddresses: info.ControlAddresses, + + PendingWorkerKey: (*WorkerKeyChange)(info.PendingWorkerKey), + + PeerId: info.PeerId, + Multiaddrs: info.Multiaddrs, + WindowPoStProofType: info.WindowPoStProofType, + SectorSize: info.SectorSize, + WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, + ConsensusFaultElapsed: info.ConsensusFaultElapsed, + + Beneficiary: info.Beneficiary, + BeneficiaryTerm: BeneficiaryTerm(info.BeneficiaryTerm), + PendingBeneficiaryTerm: (*PendingBeneficiaryChange)(info.PendingBeneficiaryTerm), + } + + return mi, nil +} + +func (s *state14) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) { + return s.State.RecordedDeadlineInfo(epoch), nil +} + +func (s *state14) DeadlineCronActive() (bool, error) { + return s.State.DeadlineCronActive, nil +} + +func (s *state14) sectors() (adt.Array, error) { + return adt14.AsArray(s.store, s.Sectors, miner14.SectorsAmtBitwidth) +} + +func (s *state14) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) { + var si miner14.SectorOnChainInfo + err := si.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorOnChainInfo{}, err + } + + return fromV14SectorOnChainInfo(si), nil +} + +func (s *state14) precommits() (adt.Map, error) { + return adt14.AsMap(s.store, s.PreCommittedSectors, builtin14.DefaultHamtBitwidth) +} + +func (s *state14) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) { + var sp miner14.SectorPreCommitOnChainInfo + err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorPreCommitOnChainInfo{}, err + } + + return fromV14SectorPreCommitOnChainInfo(sp), nil +} + +func (s *state14) EraseAllUnproven() error { + + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner14.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner14.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + if err != nil { + return err + } + + return s.State.SaveDeadlines(s.store, dls) + +} + +func (d *deadline14) LoadPartition(idx uint64) (Partition, error) { + p, err := d.Deadline.LoadPartition(d.store, idx) + if err != nil { + return nil, err + } + return &partition14{*p, d.store}, nil +} + +func (d *deadline14) ForEachPartition(cb func(uint64, Partition) error) error { + ps, err := d.Deadline.PartitionsArray(d.store) + if err != nil { + return err + } + var part miner14.Partition + return ps.ForEach(&part, func(i int64) error { + return cb(uint64(i), &partition14{part, d.store}) + }) +} + +func (d *deadline14) PartitionsChanged(other Deadline) (bool, error) { + other14, ok := other.(*deadline14) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !d.Deadline.Partitions.Equals(other14.Deadline.Partitions), nil +} + +func (d *deadline14) PartitionsPoSted() (bitfield.BitField, error) { + return d.Deadline.PartitionsPoSted, nil +} + +func (d *deadline14) DisputableProofCount() (uint64, error) { + + ops, err := d.OptimisticProofsSnapshotArray(d.store) + if err != nil { + return 0, err + } + + return ops.Length(), nil + +} + +func (p *partition14) AllSectors() (bitfield.BitField, error) { + return p.Partition.Sectors, nil +} + +func (p *partition14) FaultySectors() (bitfield.BitField, error) { + return p.Partition.Faults, nil +} + +func (p *partition14) RecoveringSectors() (bitfield.BitField, error) { + return p.Partition.Recoveries, nil +} + +func (p *partition14) UnprovenSectors() (bitfield.BitField, error) { + return p.Partition.Unproven, nil +} + +func fromV14SectorOnChainInfo(v14 miner14.SectorOnChainInfo) SectorOnChainInfo { + info := SectorOnChainInfo{ + SectorNumber: v14.SectorNumber, + SealProof: v14.SealProof, + SealedCID: v14.SealedCID, + DealIDs: v14.DealIDs, + Activation: v14.Activation, + Expiration: v14.Expiration, + DealWeight: v14.DealWeight, + VerifiedDealWeight: v14.VerifiedDealWeight, + InitialPledge: v14.InitialPledge, + ExpectedDayReward: v14.ExpectedDayReward, + ExpectedStoragePledge: v14.ExpectedStoragePledge, + + SectorKeyCID: v14.SectorKeyCID, + + PowerBaseEpoch: v14.PowerBaseEpoch, + ReplacedDayReward: v14.ReplacedDayReward, + Flags: SectorOnChainInfoFlags(v14.Flags), + } + return info +} + +func fromV14SectorPreCommitOnChainInfo(v14 miner14.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { + ret := SectorPreCommitOnChainInfo{ + Info: SectorPreCommitInfo{ + SealProof: v14.Info.SealProof, + SectorNumber: v14.Info.SectorNumber, + SealedCID: v14.Info.SealedCID, + SealRandEpoch: v14.Info.SealRandEpoch, + DealIDs: v14.Info.DealIDs, + Expiration: v14.Info.Expiration, + UnsealedCid: nil, + }, + PreCommitDeposit: v14.PreCommitDeposit, + PreCommitEpoch: v14.PreCommitEpoch, + } + + ret.Info.UnsealedCid = v14.Info.UnsealedCid + + return ret +} + +func (s *state14) GetState() interface{} { + return &s.State +} + +func (s *state14) ActorKey() string { + return manifest.MinerKey +} + +func (s *state14) ActorVersion() actorstypes.Version { + return actorstypes.Version14 +} + +func (s *state14) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/multisig/message10.go b/chain/actors/builtin/multisig/message10.go index 59dd4dde016..4ea8c705d69 100644 --- a/chain/actors/builtin/multisig/message10.go +++ b/chain/actors/builtin/multisig/message10.go @@ -8,7 +8,7 @@ import ( actorstypes "github.com/filecoin-project/go-state-types/actors" builtintypes "github.com/filecoin-project/go-state-types/builtin" multisig10 "github.com/filecoin-project/go-state-types/builtin/v10/multisig" - init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" + init14 "github.com/filecoin-project/go-state-types/builtin/v14/init" "github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/lotus/chain/actors" @@ -57,7 +57,7 @@ func (m message10) Create( } // new actors are created by invoking 'exec' on the init actor with the constructor params - execParams := &init13.ExecParams{ + execParams := &init14.ExecParams{ CodeCID: code, ConstructorParams: enc, } diff --git a/chain/actors/builtin/multisig/message11.go b/chain/actors/builtin/multisig/message11.go index 89bee0255af..2a8309a1aeb 100644 --- a/chain/actors/builtin/multisig/message11.go +++ b/chain/actors/builtin/multisig/message11.go @@ -8,7 +8,7 @@ import ( actorstypes "github.com/filecoin-project/go-state-types/actors" builtintypes "github.com/filecoin-project/go-state-types/builtin" multisig11 "github.com/filecoin-project/go-state-types/builtin/v11/multisig" - init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" + init14 "github.com/filecoin-project/go-state-types/builtin/v14/init" "github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/lotus/chain/actors" @@ -57,7 +57,7 @@ func (m message11) Create( } // new actors are created by invoking 'exec' on the init actor with the constructor params - execParams := &init13.ExecParams{ + execParams := &init14.ExecParams{ CodeCID: code, ConstructorParams: enc, } diff --git a/chain/actors/builtin/multisig/message12.go b/chain/actors/builtin/multisig/message12.go index 326026c93ff..8599a1f83a7 100644 --- a/chain/actors/builtin/multisig/message12.go +++ b/chain/actors/builtin/multisig/message12.go @@ -8,7 +8,7 @@ import ( actorstypes "github.com/filecoin-project/go-state-types/actors" builtintypes "github.com/filecoin-project/go-state-types/builtin" multisig12 "github.com/filecoin-project/go-state-types/builtin/v12/multisig" - init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" + init14 "github.com/filecoin-project/go-state-types/builtin/v14/init" "github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/lotus/chain/actors" @@ -57,7 +57,7 @@ func (m message12) Create( } // new actors are created by invoking 'exec' on the init actor with the constructor params - execParams := &init13.ExecParams{ + execParams := &init14.ExecParams{ CodeCID: code, ConstructorParams: enc, } diff --git a/chain/actors/builtin/multisig/message13.go b/chain/actors/builtin/multisig/message13.go index 94a9cbfbf7e..0484ba0eb4b 100644 --- a/chain/actors/builtin/multisig/message13.go +++ b/chain/actors/builtin/multisig/message13.go @@ -7,8 +7,8 @@ import ( "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" builtintypes "github.com/filecoin-project/go-state-types/builtin" - init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" multisig13 "github.com/filecoin-project/go-state-types/builtin/v13/multisig" + init14 "github.com/filecoin-project/go-state-types/builtin/v14/init" "github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/lotus/chain/actors" @@ -57,7 +57,7 @@ func (m message13) Create( } // new actors are created by invoking 'exec' on the init actor with the constructor params - execParams := &init13.ExecParams{ + execParams := &init14.ExecParams{ CodeCID: code, ConstructorParams: enc, } diff --git a/chain/actors/builtin/multisig/message14.go b/chain/actors/builtin/multisig/message14.go new file mode 100644 index 00000000000..fc56c6060a0 --- /dev/null +++ b/chain/actors/builtin/multisig/message14.go @@ -0,0 +1,77 @@ +package multisig + +import ( + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + builtintypes "github.com/filecoin-project/go-state-types/builtin" + init14 "github.com/filecoin-project/go-state-types/builtin/v14/init" + multisig14 "github.com/filecoin-project/go-state-types/builtin/v14/multisig" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/types" +) + +type message14 struct{ message0 } + +func (m message14) Create( + signers []address.Address, threshold uint64, + unlockStart, unlockDuration abi.ChainEpoch, + initialAmount abi.TokenAmount, +) (*types.Message, error) { + + lenAddrs := uint64(len(signers)) + + if lenAddrs < threshold { + return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig") + } + + if threshold == 0 { + threshold = lenAddrs + } + + if m.from == address.Undef { + return nil, xerrors.Errorf("must provide source address") + } + + // Set up constructor parameters for multisig + msigParams := &multisig14.ConstructorParams{ + Signers: signers, + NumApprovalsThreshold: threshold, + UnlockDuration: unlockDuration, + StartEpoch: unlockStart, + } + + enc, actErr := actors.SerializeParams(msigParams) + if actErr != nil { + return nil, actErr + } + + code, ok := actors.GetActorCodeID(actorstypes.Version14, manifest.MultisigKey) + if !ok { + return nil, xerrors.Errorf("failed to get multisig code ID") + } + + // new actors are created by invoking 'exec' on the init actor with the constructor params + execParams := &init14.ExecParams{ + CodeCID: code, + ConstructorParams: enc, + } + + enc, actErr = actors.SerializeParams(execParams) + if actErr != nil { + return nil, actErr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Method: builtintypes.MethodsInit.Exec, + Params: enc, + Value: initialAmount, + }, nil +} diff --git a/chain/actors/builtin/multisig/message8.go b/chain/actors/builtin/multisig/message8.go index 5d79fe6c5a7..49cd24c3db7 100644 --- a/chain/actors/builtin/multisig/message8.go +++ b/chain/actors/builtin/multisig/message8.go @@ -7,7 +7,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" builtintypes "github.com/filecoin-project/go-state-types/builtin" - init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" + init14 "github.com/filecoin-project/go-state-types/builtin/v14/init" multisig8 "github.com/filecoin-project/go-state-types/builtin/v8/multisig" "github.com/filecoin-project/go-state-types/manifest" @@ -57,7 +57,7 @@ func (m message8) Create( } // new actors are created by invoking 'exec' on the init actor with the constructor params - execParams := &init13.ExecParams{ + execParams := &init14.ExecParams{ CodeCID: code, ConstructorParams: enc, } diff --git a/chain/actors/builtin/multisig/message9.go b/chain/actors/builtin/multisig/message9.go index 9003b7e38d8..66d6990e4e5 100644 --- a/chain/actors/builtin/multisig/message9.go +++ b/chain/actors/builtin/multisig/message9.go @@ -7,7 +7,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" builtintypes "github.com/filecoin-project/go-state-types/builtin" - init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" + init14 "github.com/filecoin-project/go-state-types/builtin/v14/init" multisig9 "github.com/filecoin-project/go-state-types/builtin/v9/multisig" "github.com/filecoin-project/go-state-types/manifest" @@ -57,7 +57,7 @@ func (m message9) Create( } // new actors are created by invoking 'exec' on the init actor with the constructor params - execParams := &init13.ExecParams{ + execParams := &init14.ExecParams{ CodeCID: code, ConstructorParams: enc, } diff --git a/chain/actors/builtin/multisig/multisig.go b/chain/actors/builtin/multisig/multisig.go index 08da9bd2dca..e35a183a332 100644 --- a/chain/actors/builtin/multisig/multisig.go +++ b/chain/actors/builtin/multisig/multisig.go @@ -12,7 +12,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" builtintypes "github.com/filecoin-project/go-state-types/builtin" - msig13 "github.com/filecoin-project/go-state-types/builtin/v13/multisig" + msig14 "github.com/filecoin-project/go-state-types/builtin/v14/multisig" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/manifest" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" @@ -54,6 +54,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version13: return load13(store, act.Head) + case actorstypes.Version14: + return load14(store, act.Head) + } } @@ -127,6 +130,9 @@ func MakeState(store adt.Store, av actorstypes.Version, signers []address.Addres case actorstypes.Version13: return make13(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + case actorstypes.Version14: + return make14(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -153,7 +159,7 @@ type State interface { GetState() interface{} } -type Transaction = msig13.Transaction +type Transaction = msig14.Transaction var Methods = builtintypes.MethodsMultisig @@ -198,6 +204,9 @@ func Message(version actorstypes.Version, from address.Address) MessageBuilder { case actorstypes.Version13: return message13{message0{from}} + + case actorstypes.Version14: + return message14{message0{from}} default: panic(fmt.Sprintf("unsupported actors version: %d", version)) } @@ -221,13 +230,13 @@ type MessageBuilder interface { } // this type is the same between v0 and v2 -type ProposalHashData = msig13.ProposalHashData -type ProposeReturn = msig13.ProposeReturn -type ProposeParams = msig13.ProposeParams -type ApproveReturn = msig13.ApproveReturn +type ProposalHashData = msig14.ProposalHashData +type ProposeReturn = msig14.ProposeReturn +type ProposeParams = msig14.ProposeParams +type ApproveReturn = msig14.ApproveReturn func txnParams(id uint64, data *ProposalHashData) ([]byte, error) { - params := msig13.TxnIDParams{ID: msig13.TxnID(id)} + params := msig14.TxnIDParams{ID: msig14.TxnID(id)} if data != nil { if data.Requester.Protocol() != address.ID { return nil, xerrors.Errorf("proposer address must be an ID address, was %s", data.Requester) @@ -264,5 +273,6 @@ func AllCodes() []cid.Cid { (&state11{}).Code(), (&state12{}).Code(), (&state13{}).Code(), + (&state14{}).Code(), } } diff --git a/chain/actors/builtin/multisig/v14.go b/chain/actors/builtin/multisig/v14.go new file mode 100644 index 00000000000..1648fcbbfe1 --- /dev/null +++ b/chain/actors/builtin/multisig/v14.go @@ -0,0 +1,138 @@ +package multisig + +import ( + "bytes" + "encoding/binary" + "fmt" + + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + builtin14 "github.com/filecoin-project/go-state-types/builtin" + msig14 "github.com/filecoin-project/go-state-types/builtin/v14/multisig" + adt14 "github.com/filecoin-project/go-state-types/builtin/v14/util/adt" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state14)(nil) + +func load14(store adt.Store, root cid.Cid) (State, error) { + out := state14{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make14(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state14{store: store} + out.State = msig14.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt14.StoreEmptyMap(store, builtin14.DefaultHamtBitwidth) + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + +type state14 struct { + msig14.State + store adt.Store +} + +func (s *state14) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil +} + +func (s *state14) StartEpoch() (abi.ChainEpoch, error) { + return s.State.StartEpoch, nil +} + +func (s *state14) UnlockDuration() (abi.ChainEpoch, error) { + return s.State.UnlockDuration, nil +} + +func (s *state14) InitialBalance() (abi.TokenAmount, error) { + return s.State.InitialBalance, nil +} + +func (s *state14) Threshold() (uint64, error) { + return s.State.NumApprovalsThreshold, nil +} + +func (s *state14) Signers() ([]address.Address, error) { + return s.State.Signers, nil +} + +func (s *state14) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error { + arr, err := adt14.AsMap(s.store, s.State.PendingTxns, builtin14.DefaultHamtBitwidth) + if err != nil { + return err + } + var out msig14.Transaction + return arr.ForEach(&out, func(key string) error { + txid, n := binary.Varint([]byte(key)) + if n <= 0 { + return xerrors.Errorf("invalid pending transaction key: %v", key) + } + return cb(txid, (Transaction)(out)) //nolint:unconvert + }) +} + +func (s *state14) PendingTxnChanged(other State) (bool, error) { + other14, ok := other.(*state14) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other14.PendingTxns), nil +} + +func (s *state14) transactions() (adt.Map, error) { + return adt14.AsMap(s.store, s.PendingTxns, builtin14.DefaultHamtBitwidth) +} + +func (s *state14) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx msig14.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return Transaction(tx), nil +} + +func (s *state14) GetState() interface{} { + return &s.State +} + +func (s *state14) ActorKey() string { + return manifest.MultisigKey +} + +func (s *state14) ActorVersion() actorstypes.Version { + return actorstypes.Version14 +} + +func (s *state14) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/paych/message14.go b/chain/actors/builtin/paych/message14.go new file mode 100644 index 00000000000..a33c66dd532 --- /dev/null +++ b/chain/actors/builtin/paych/message14.go @@ -0,0 +1,109 @@ +package paych + +import ( + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + builtin14 "github.com/filecoin-project/go-state-types/builtin" + init14 "github.com/filecoin-project/go-state-types/builtin/v14/init" + paych14 "github.com/filecoin-project/go-state-types/builtin/v14/paych" + paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" + + "github.com/filecoin-project/lotus/chain/actors" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/types" +) + +type message14 struct{ from address.Address } + +func (m message14) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) { + + actorCodeID, ok := actors.GetActorCodeID(actorstypes.Version14, "paymentchannel") + if !ok { + return nil, xerrors.Errorf("error getting actor paymentchannel code id for actor version %d", 14) + } + + params, aerr := actors.SerializeParams(&paych14.ConstructorParams{From: m.from, To: to}) + if aerr != nil { + return nil, aerr + } + enc, aerr := actors.SerializeParams(&init14.ExecParams{ + CodeCID: actorCodeID, + ConstructorParams: params, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Value: initialAmount, + Method: builtin14.MethodsInit.Exec, + Params: enc, + }, nil +} + +func (m message14) Update(paych address.Address, sv *paychtypes.SignedVoucher, secret []byte) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych14.UpdateChannelStateParams{ + + Sv: toV14SignedVoucher(*sv), + + Secret: secret, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin14.MethodsPaych.UpdateChannelState, + Params: params, + }, nil +} + +func toV14SignedVoucher(sv paychtypes.SignedVoucher) paych14.SignedVoucher { + merges := make([]paych14.Merge, len(sv.Merges)) + for i := range sv.Merges { + merges[i] = paych14.Merge{ + Lane: sv.Merges[i].Lane, + Nonce: sv.Merges[i].Nonce, + } + } + + return paych14.SignedVoucher{ + ChannelAddr: sv.ChannelAddr, + TimeLockMin: sv.TimeLockMin, + TimeLockMax: sv.TimeLockMax, + SecretHash: sv.SecretHash, + Extra: (*paych14.ModVerifyParams)(sv.Extra), + Lane: sv.Lane, + Nonce: sv.Nonce, + Amount: sv.Amount, + MinSettleHeight: sv.MinSettleHeight, + Merges: merges, + Signature: sv.Signature, + } +} + +func (m message14) Settle(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin14.MethodsPaych.Settle, + }, nil +} + +func (m message14) Collect(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin14.MethodsPaych.Collect, + }, nil +} diff --git a/chain/actors/builtin/paych/paych.go b/chain/actors/builtin/paych/paych.go index 2b5c78edfee..1f79c6d08ad 100644 --- a/chain/actors/builtin/paych/paych.go +++ b/chain/actors/builtin/paych/paych.go @@ -56,6 +56,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version13: return load13(store, act.Head) + case actorstypes.Version14: + return load14(store, act.Head) + } } @@ -179,6 +182,9 @@ func Message(version actorstypes.Version, from address.Address) MessageBuilder { case actorstypes.Version13: return message13{from} + case actorstypes.Version14: + return message14{from} + default: panic(fmt.Sprintf("unsupported actors version: %d", version)) } @@ -222,5 +228,6 @@ func AllCodes() []cid.Cid { (&state11{}).Code(), (&state12{}).Code(), (&state13{}).Code(), + (&state14{}).Code(), } } diff --git a/chain/actors/builtin/paych/v14.go b/chain/actors/builtin/paych/v14.go new file mode 100644 index 00000000000..b0ad7d7340f --- /dev/null +++ b/chain/actors/builtin/paych/v14.go @@ -0,0 +1,135 @@ +package paych + +import ( + "fmt" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/big" + paych14 "github.com/filecoin-project/go-state-types/builtin/v14/paych" + adt14 "github.com/filecoin-project/go-state-types/builtin/v14/util/adt" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state14)(nil) + +func load14(store adt.Store, root cid.Cid) (State, error) { + out := state14{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make14(store adt.Store) (State, error) { + out := state14{store: store} + out.State = paych14.State{} + return &out, nil +} + +type state14 struct { + paych14.State + store adt.Store + lsAmt *adt14.Array +} + +// Channel owner, who has funded the actor +func (s *state14) From() (address.Address, error) { + return s.State.From, nil +} + +// Recipient of payouts from channel +func (s *state14) To() (address.Address, error) { + return s.State.To, nil +} + +// Height at which the channel can be `Collected` +func (s *state14) SettlingAt() (abi.ChainEpoch, error) { + return s.State.SettlingAt, nil +} + +// Amount successfully redeemed through the payment channel, paid out on `Collect()` +func (s *state14) ToSend() (abi.TokenAmount, error) { + return s.State.ToSend, nil +} + +func (s *state14) getOrLoadLsAmt() (*adt14.Array, error) { + if s.lsAmt != nil { + return s.lsAmt, nil + } + + // Get the lane state from the chain + lsamt, err := adt14.AsArray(s.store, s.State.LaneStates, paych14.LaneStatesAmtBitwidth) + if err != nil { + return nil, err + } + + s.lsAmt = lsamt + return lsamt, nil +} + +// Get total number of lanes +func (s *state14) LaneCount() (uint64, error) { + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return 0, err + } + return lsamt.Length(), nil +} + +func (s *state14) GetState() interface{} { + return &s.State +} + +// Iterate lane states +func (s *state14) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { + // Get the lane state from the chain + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return err + } + + // Note: we use a map instead of an array to store laneStates because the + // client sets the lane ID (the index) and potentially they could use a + // very large index. + var ls paych14.LaneState + return lsamt.ForEach(&ls, func(i int64) error { + return cb(uint64(i), &laneState14{ls}) + }) +} + +type laneState14 struct { + paych14.LaneState +} + +func (ls *laneState14) Redeemed() (big.Int, error) { + return ls.LaneState.Redeemed, nil +} + +func (ls *laneState14) Nonce() (uint64, error) { + return ls.LaneState.Nonce, nil +} + +func (s *state14) ActorKey() string { + return manifest.PaychKey +} + +func (s *state14) ActorVersion() actorstypes.Version { + return actorstypes.Version14 +} + +func (s *state14) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/power/power.go b/chain/actors/builtin/power/power.go index e263e3f8788..b08f6589909 100644 --- a/chain/actors/builtin/power/power.go +++ b/chain/actors/builtin/power/power.go @@ -9,7 +9,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" "github.com/filecoin-project/go-state-types/big" - builtin13 "github.com/filecoin-project/go-state-types/builtin" + builtin14 "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/manifest" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" @@ -27,8 +27,8 @@ import ( ) var ( - Address = builtin13.StoragePowerActorAddr - Methods = builtin13.MethodsPower + Address = builtin14.StoragePowerActorAddr + Methods = builtin14.MethodsPower ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -57,6 +57,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version13: return load13(store, act.Head) + case actorstypes.Version14: + return load14(store, act.Head) + } } @@ -130,6 +133,9 @@ func MakeState(store adt.Store, av actorstypes.Version) (State, error) { case actorstypes.Version13: return make13(store) + case actorstypes.Version14: + return make14(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -197,5 +203,6 @@ func AllCodes() []cid.Cid { (&state11{}).Code(), (&state12{}).Code(), (&state13{}).Code(), + (&state14{}).Code(), } } diff --git a/chain/actors/builtin/power/v14.go b/chain/actors/builtin/power/v14.go new file mode 100644 index 00000000000..8c0249894f9 --- /dev/null +++ b/chain/actors/builtin/power/v14.go @@ -0,0 +1,207 @@ +package power + +import ( + "bytes" + "fmt" + + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + builtin14 "github.com/filecoin-project/go-state-types/builtin" + power14 "github.com/filecoin-project/go-state-types/builtin/v14/power" + adt14 "github.com/filecoin-project/go-state-types/builtin/v14/util/adt" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" +) + +var _ State = (*state14)(nil) + +func load14(store adt.Store, root cid.Cid) (State, error) { + out := state14{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make14(store adt.Store) (State, error) { + out := state14{store: store} + + s, err := power14.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state14 struct { + power14.State + store adt.Store +} + +func (s *state14) TotalLocked() (abi.TokenAmount, error) { + return s.TotalPledgeCollateral, nil +} + +func (s *state14) TotalPower() (Claim, error) { + return Claim{ + RawBytePower: s.TotalRawBytePower, + QualityAdjPower: s.TotalQualityAdjPower, + }, nil +} + +// Committed power to the network. Includes miners below the minimum threshold. +func (s *state14) TotalCommitted() (Claim, error) { + return Claim{ + RawBytePower: s.TotalBytesCommitted, + QualityAdjPower: s.TotalQABytesCommitted, + }, nil +} + +func (s *state14) MinerPower(addr address.Address) (Claim, bool, error) { + claims, err := s.claims() + if err != nil { + return Claim{}, false, err + } + var claim power14.Claim + ok, err := claims.Get(abi.AddrKey(addr), &claim) + if err != nil { + return Claim{}, false, err + } + return Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }, ok, nil +} + +func (s *state14) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) { + return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a) +} + +func (s *state14) TotalPowerSmoothed() (builtin.FilterEstimate, error) { + return builtin.FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil +} + +func (s *state14) MinerCounts() (uint64, uint64, error) { + return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil +} + +func (s *state14) ListAllMiners() ([]address.Address, error) { + claims, err := s.claims() + if err != nil { + return nil, err + } + + var miners []address.Address + err = claims.ForEach(nil, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + miners = append(miners, a) + return nil + }) + if err != nil { + return nil, err + } + + return miners, nil +} + +func (s *state14) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { + claims, err := s.claims() + if err != nil { + return err + } + + var claim power14.Claim + return claims.ForEach(&claim, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + return cb(a, Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }) + }) +} + +func (s *state14) ClaimsChanged(other State) (bool, error) { + other14, ok := other.(*state14) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other14.State.Claims), nil +} + +func (s *state14) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state14) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state14) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state14) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state14) GetState() interface{} { + return &s.State +} + +func (s *state14) claims() (adt.Map, error) { + return adt14.AsMap(s.store, s.Claims, builtin14.DefaultHamtBitwidth) +} + +func (s *state14) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power14.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV14Claim(ci), nil +} + +func fromV14Claim(v14 power14.Claim) Claim { + return Claim{ + RawBytePower: v14.RawBytePower, + QualityAdjPower: v14.QualityAdjPower, + } +} + +func (s *state14) ActorKey() string { + return manifest.PowerKey +} + +func (s *state14) ActorVersion() actorstypes.Version { + return actorstypes.Version14 +} + +func (s *state14) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/registry.go b/chain/actors/builtin/registry.go index 93768580b08..bbded891955 100644 --- a/chain/actors/builtin/registry.go +++ b/chain/actors/builtin/registry.go @@ -74,6 +74,22 @@ import ( reward13 "github.com/filecoin-project/go-state-types/builtin/v13/reward" system13 "github.com/filecoin-project/go-state-types/builtin/v13/system" verifreg13 "github.com/filecoin-project/go-state-types/builtin/v13/verifreg" + account14 "github.com/filecoin-project/go-state-types/builtin/v14/account" + cron14 "github.com/filecoin-project/go-state-types/builtin/v14/cron" + datacap14 "github.com/filecoin-project/go-state-types/builtin/v14/datacap" + eam14 "github.com/filecoin-project/go-state-types/builtin/v14/eam" + ethaccount14 "github.com/filecoin-project/go-state-types/builtin/v14/ethaccount" + evm14 "github.com/filecoin-project/go-state-types/builtin/v14/evm" + _init14 "github.com/filecoin-project/go-state-types/builtin/v14/init" + market14 "github.com/filecoin-project/go-state-types/builtin/v14/market" + miner14 "github.com/filecoin-project/go-state-types/builtin/v14/miner" + multisig14 "github.com/filecoin-project/go-state-types/builtin/v14/multisig" + paych14 "github.com/filecoin-project/go-state-types/builtin/v14/paych" + placeholder14 "github.com/filecoin-project/go-state-types/builtin/v14/placeholder" + power14 "github.com/filecoin-project/go-state-types/builtin/v14/power" + reward14 "github.com/filecoin-project/go-state-types/builtin/v14/reward" + system14 "github.com/filecoin-project/go-state-types/builtin/v14/system" + verifreg14 "github.com/filecoin-project/go-state-types/builtin/v14/verifreg" account8 "github.com/filecoin-project/go-state-types/builtin/v8/account" cron8 "github.com/filecoin-project/go-state-types/builtin/v8/cron" _init8 "github.com/filecoin-project/go-state-types/builtin/v8/init" @@ -737,6 +753,110 @@ func MakeRegistry(av actorstypes.Version) []RegistryEntry { } } + case actorstypes.Version14: + for key, codeID := range codeIDs { + switch key { + case manifest.AccountKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: account14.Methods, + state: new(account14.State), + }) + case manifest.CronKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: cron14.Methods, + state: new(cron14.State), + }) + case manifest.InitKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: _init14.Methods, + state: new(_init14.State), + }) + case manifest.MarketKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: market14.Methods, + state: new(market14.State), + }) + case manifest.MinerKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: miner14.Methods, + state: new(miner14.State), + }) + case manifest.MultisigKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: multisig14.Methods, + state: new(multisig14.State), + }) + case manifest.PaychKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: paych14.Methods, + state: new(paych14.State), + }) + case manifest.PowerKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: power14.Methods, + state: new(power14.State), + }) + case manifest.RewardKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: reward14.Methods, + state: new(reward14.State), + }) + case manifest.SystemKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: system14.Methods, + state: new(system14.State), + }) + case manifest.VerifregKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: verifreg14.Methods, + state: new(verifreg14.State), + }) + case manifest.DatacapKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: datacap14.Methods, + state: new(datacap14.State), + }) + + case manifest.EvmKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: evm14.Methods, + state: new(evm14.State), + }) + case manifest.EamKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: eam14.Methods, + state: nil, + }) + case manifest.PlaceholderKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: placeholder14.Methods, + state: nil, + }) + case manifest.EthAccountKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: ethaccount14.Methods, + state: nil, + }) + + } + } + default: panic("expected version v8 and up only, use specs-actors for v0-7") } diff --git a/chain/actors/builtin/reward/reward.go b/chain/actors/builtin/reward/reward.go index a6c8bff5b55..18860769ca1 100644 --- a/chain/actors/builtin/reward/reward.go +++ b/chain/actors/builtin/reward/reward.go @@ -6,7 +6,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" - builtin13 "github.com/filecoin-project/go-state-types/builtin" + builtin14 "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/manifest" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" @@ -25,8 +25,8 @@ import ( ) var ( - Address = builtin13.RewardActorAddr - Methods = builtin13.MethodsReward + Address = builtin14.RewardActorAddr + Methods = builtin14.MethodsReward ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -55,6 +55,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version13: return load13(store, act.Head) + case actorstypes.Version14: + return load14(store, act.Head) + } } @@ -128,6 +131,9 @@ func MakeState(store adt.Store, av actorstypes.Version, currRealizedPower abi.St case actorstypes.Version13: return make13(store, currRealizedPower) + case actorstypes.Version14: + return make14(store, currRealizedPower) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -173,5 +179,6 @@ func AllCodes() []cid.Cid { (&state11{}).Code(), (&state12{}).Code(), (&state13{}).Code(), + (&state14{}).Code(), } } diff --git a/chain/actors/builtin/reward/v14.go b/chain/actors/builtin/reward/v14.go new file mode 100644 index 00000000000..89fa295a352 --- /dev/null +++ b/chain/actors/builtin/reward/v14.go @@ -0,0 +1,120 @@ +package reward + +import ( + "fmt" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + miner14 "github.com/filecoin-project/go-state-types/builtin/v14/miner" + reward14 "github.com/filecoin-project/go-state-types/builtin/v14/reward" + smoothing14 "github.com/filecoin-project/go-state-types/builtin/v14/util/smoothing" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" +) + +var _ State = (*state14)(nil) + +func load14(store adt.Store, root cid.Cid) (State, error) { + out := state14{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make14(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state14{store: store} + out.State = *reward14.ConstructState(currRealizedPower) + return &out, nil +} + +type state14 struct { + reward14.State + store adt.Store +} + +func (s *state14) ThisEpochReward() (abi.TokenAmount, error) { + return s.State.ThisEpochReward, nil +} + +func (s *state14) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) { + + return builtin.FilterEstimate{ + PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate, + VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate, + }, nil + +} + +func (s *state14) ThisEpochBaselinePower() (abi.StoragePower, error) { + return s.State.ThisEpochBaselinePower, nil +} + +func (s *state14) TotalStoragePowerReward() (abi.TokenAmount, error) { + return s.State.TotalStoragePowerReward, nil +} + +func (s *state14) EffectiveBaselinePower() (abi.StoragePower, error) { + return s.State.EffectiveBaselinePower, nil +} + +func (s *state14) EffectiveNetworkTime() (abi.ChainEpoch, error) { + return s.State.EffectiveNetworkTime, nil +} + +func (s *state14) CumsumBaseline() (reward14.Spacetime, error) { + return s.State.CumsumBaseline, nil +} + +func (s *state14) CumsumRealized() (reward14.Spacetime, error) { + return s.State.CumsumRealized, nil +} + +func (s *state14) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) { + return miner14.InitialPledgeForPower( + qaPower, + s.State.ThisEpochBaselinePower, + s.State.ThisEpochRewardSmoothed, + smoothing14.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + circSupply, + ), nil +} + +func (s *state14) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) { + return miner14.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed, + smoothing14.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + sectorWeight), nil +} + +func (s *state14) GetState() interface{} { + return &s.State +} + +func (s *state14) ActorKey() string { + return manifest.RewardKey +} + +func (s *state14) ActorVersion() actorstypes.Version { + return actorstypes.Version14 +} + +func (s *state14) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/system/system.go b/chain/actors/builtin/system/system.go index 1526a1fc0a5..8f72f717244 100644 --- a/chain/actors/builtin/system/system.go +++ b/chain/actors/builtin/system/system.go @@ -5,7 +5,7 @@ import ( "golang.org/x/xerrors" actorstypes "github.com/filecoin-project/go-state-types/actors" - builtin13 "github.com/filecoin-project/go-state-types/builtin" + builtin14 "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/manifest" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" @@ -21,7 +21,7 @@ import ( ) var ( - Address = builtin13.SystemActorAddr + Address = builtin14.SystemActorAddr ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -50,6 +50,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version13: return load13(store, act.Head) + case actorstypes.Version14: + return load14(store, act.Head) + } } @@ -123,6 +126,9 @@ func MakeState(store adt.Store, av actorstypes.Version, builtinActors cid.Cid) ( case actorstypes.Version13: return make13(store, builtinActors) + case actorstypes.Version14: + return make14(store, builtinActors) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -152,5 +158,6 @@ func AllCodes() []cid.Cid { (&state11{}).Code(), (&state12{}).Code(), (&state13{}).Code(), + (&state14{}).Code(), } } diff --git a/chain/actors/builtin/system/v14.go b/chain/actors/builtin/system/v14.go new file mode 100644 index 00000000000..897b5f6fdc4 --- /dev/null +++ b/chain/actors/builtin/system/v14.go @@ -0,0 +1,72 @@ +package system + +import ( + "fmt" + + "github.com/ipfs/go-cid" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + system14 "github.com/filecoin-project/go-state-types/builtin/v14/system" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state14)(nil) + +func load14(store adt.Store, root cid.Cid) (State, error) { + out := state14{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make14(store adt.Store, builtinActors cid.Cid) (State, error) { + out := state14{store: store} + out.State = system14.State{ + BuiltinActors: builtinActors, + } + return &out, nil +} + +type state14 struct { + system14.State + store adt.Store +} + +func (s *state14) GetState() interface{} { + return &s.State +} + +func (s *state14) GetBuiltinActors() cid.Cid { + + return s.State.BuiltinActors + +} + +func (s *state14) SetBuiltinActors(c cid.Cid) error { + + s.State.BuiltinActors = c + return nil + +} + +func (s *state14) ActorKey() string { + return manifest.SystemKey +} + +func (s *state14) ActorVersion() actorstypes.Version { + return actorstypes.Version14 +} + +func (s *state14) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/verifreg/v14.go b/chain/actors/builtin/verifreg/v14.go new file mode 100644 index 00000000000..6bfa82be571 --- /dev/null +++ b/chain/actors/builtin/verifreg/v14.go @@ -0,0 +1,196 @@ +package verifreg + +import ( + "fmt" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/big" + builtin14 "github.com/filecoin-project/go-state-types/builtin" + adt14 "github.com/filecoin-project/go-state-types/builtin/v14/util/adt" + verifreg14 "github.com/filecoin-project/go-state-types/builtin/v14/verifreg" + verifreg9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state14)(nil) + +func load14(store adt.Store, root cid.Cid) (State, error) { + out := state14{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make14(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state14{store: store} + + s, err := verifreg14.ConstructState(store, rootKeyAddress) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state14 struct { + verifreg14.State + store adt.Store +} + +func (s *state14) RootKey() (address.Address, error) { + return s.State.RootKey, nil +} + +func (s *state14) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + + return false, big.Zero(), xerrors.Errorf("unsupported in actors v14") + +} + +func (s *state14) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version14, s.verifiers, addr) +} + +func (s *state14) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version14, s.removeDataCapProposalIDs, verifier, client) +} + +func (s *state14) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version14, s.verifiers, cb) +} + +func (s *state14) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + + return xerrors.Errorf("unsupported in actors v14") + +} + +func (s *state14) verifiedClients() (adt.Map, error) { + + return nil, xerrors.Errorf("unsupported in actors v14") + +} + +func (s *state14) verifiers() (adt.Map, error) { + return adt14.AsMap(s.store, s.Verifiers, builtin14.DefaultHamtBitwidth) +} + +func (s *state14) removeDataCapProposalIDs() (adt.Map, error) { + return adt14.AsMap(s.store, s.RemoveDataCapProposalIDs, builtin14.DefaultHamtBitwidth) +} + +func (s *state14) GetState() interface{} { + return &s.State +} + +func (s *state14) GetAllocation(clientIdAddr address.Address, allocationId verifreg9.AllocationId) (*Allocation, bool, error) { + + alloc, ok, err := s.FindAllocation(s.store, clientIdAddr, verifreg14.AllocationId(allocationId)) + return (*Allocation)(alloc), ok, err +} + +func (s *state14) GetAllocations(clientIdAddr address.Address) (map[AllocationId]Allocation, error) { + + v14Map, err := s.LoadAllocationsToMap(s.store, clientIdAddr) + + retMap := make(map[AllocationId]Allocation, len(v14Map)) + for k, v := range v14Map { + retMap[AllocationId(k)] = Allocation(v) + } + + return retMap, err + +} + +func (s *state14) GetAllAllocations() (map[AllocationId]Allocation, error) { + + v14Map, err := s.State.GetAllAllocations(s.store) + + retMap := make(map[AllocationId]Allocation, len(v14Map)) + for k, v := range v14Map { + retMap[AllocationId(k)] = Allocation(v) + } + + return retMap, err + +} + +func (s *state14) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { + + claim, ok, err := s.FindClaim(s.store, providerIdAddr, verifreg14.ClaimId(claimId)) + return (*Claim)(claim), ok, err + +} + +func (s *state14) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, error) { + + v14Map, err := s.LoadClaimsToMap(s.store, providerIdAddr) + + retMap := make(map[ClaimId]Claim, len(v14Map)) + for k, v := range v14Map { + retMap[ClaimId(k)] = Claim(v) + } + + return retMap, err + +} + +func (s *state14) GetAllClaims() (map[ClaimId]Claim, error) { + + v14Map, err := s.State.GetAllClaims(s.store) + + retMap := make(map[ClaimId]Claim, len(v14Map)) + for k, v := range v14Map { + retMap[ClaimId(k)] = Claim(v) + } + + return retMap, err + +} + +func (s *state14) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { + + v14Map, err := s.LoadClaimsToMap(s.store, providerIdAddr) + + retMap := make(map[abi.SectorNumber][]ClaimId) + for k, v := range v14Map { + claims, ok := retMap[v.Sector] + if !ok { + retMap[v.Sector] = []ClaimId{ClaimId(k)} + } else { + retMap[v.Sector] = append(claims, ClaimId(k)) + } + } + + return retMap, err + +} + +func (s *state14) ActorKey() string { + return manifest.VerifregKey +} + +func (s *state14) ActorVersion() actorstypes.Version { + return actorstypes.Version14 +} + +func (s *state14) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/verifreg/verifreg.go b/chain/actors/builtin/verifreg/verifreg.go index 2d66d90282d..97976852fc6 100644 --- a/chain/actors/builtin/verifreg/verifreg.go +++ b/chain/actors/builtin/verifreg/verifreg.go @@ -7,7 +7,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" - builtin13 "github.com/filecoin-project/go-state-types/builtin" + builtin14 "github.com/filecoin-project/go-state-types/builtin" verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/manifest" @@ -25,8 +25,8 @@ import ( ) var ( - Address = builtin13.VerifiedRegistryActorAddr - Methods = builtin13.MethodsVerifiedRegistry + Address = builtin14.VerifiedRegistryActorAddr + Methods = builtin14.MethodsVerifiedRegistry ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -55,6 +55,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version13: return load13(store, act.Head) + case actorstypes.Version14: + return load14(store, act.Head) + } } @@ -128,6 +131,9 @@ func MakeState(store adt.Store, av actorstypes.Version, rootKeyAddress address.A case actorstypes.Version13: return make13(store, rootKeyAddress) + case actorstypes.Version14: + return make14(store, rootKeyAddress) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -170,6 +176,7 @@ func AllCodes() []cid.Cid { (&state11{}).Code(), (&state12{}).Code(), (&state13{}).Code(), + (&state14{}).Code(), } } diff --git a/chain/actors/policy/policy.go b/chain/actors/policy/policy.go index b8d23903c44..6d74d72d846 100644 --- a/chain/actors/policy/policy.go +++ b/chain/actors/policy/policy.go @@ -10,6 +10,7 @@ import ( builtin11 "github.com/filecoin-project/go-state-types/builtin" builtin12 "github.com/filecoin-project/go-state-types/builtin" builtin13 "github.com/filecoin-project/go-state-types/builtin" + builtin14 "github.com/filecoin-project/go-state-types/builtin" builtin8 "github.com/filecoin-project/go-state-types/builtin" builtin9 "github.com/filecoin-project/go-state-types/builtin" market10 "github.com/filecoin-project/go-state-types/builtin/v10/market" @@ -23,8 +24,11 @@ import ( verifreg12 "github.com/filecoin-project/go-state-types/builtin/v12/verifreg" market13 "github.com/filecoin-project/go-state-types/builtin/v13/market" miner13 "github.com/filecoin-project/go-state-types/builtin/v13/miner" - paych13 "github.com/filecoin-project/go-state-types/builtin/v13/paych" verifreg13 "github.com/filecoin-project/go-state-types/builtin/v13/verifreg" + market14 "github.com/filecoin-project/go-state-types/builtin/v14/market" + miner14 "github.com/filecoin-project/go-state-types/builtin/v14/miner" + paych14 "github.com/filecoin-project/go-state-types/builtin/v14/paych" + verifreg14 "github.com/filecoin-project/go-state-types/builtin/v14/verifreg" market8 "github.com/filecoin-project/go-state-types/builtin/v8/market" miner8 "github.com/filecoin-project/go-state-types/builtin/v8/miner" verifreg8 "github.com/filecoin-project/go-state-types/builtin/v8/verifreg" @@ -63,14 +67,14 @@ import ( ) const ( - ChainFinality = miner13.ChainFinality + ChainFinality = miner14.ChainFinality SealRandomnessLookback = ChainFinality - PaychSettleDelay = paych13.SettleDelay - MaxPreCommitRandomnessLookback = builtin13.EpochsInDay + SealRandomnessLookback + PaychSettleDelay = paych14.SettleDelay + MaxPreCommitRandomnessLookback = builtin14.EpochsInDay + SealRandomnessLookback ) var ( - MarketDefaultAllocationTermBuffer = market13.MarketDefaultAllocationTermBuffer + MarketDefaultAllocationTermBuffer = market14.MarketDefaultAllocationTermBuffer ) // SetSupportedProofTypes sets supported proof types, across all actor versions. @@ -187,11 +191,13 @@ func SetPreCommitChallengeDelay(delay abi.ChainEpoch) { miner13.PreCommitChallengeDelay = delay + miner14.PreCommitChallengeDelay = delay + } // TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay. func GetPreCommitChallengeDelay() abi.ChainEpoch { - return miner13.PreCommitChallengeDelay + return miner14.PreCommitChallengeDelay } // SetConsensusMinerMinPower sets the minimum power of an individual miner must @@ -249,6 +255,10 @@ func SetConsensusMinerMinPower(p abi.StoragePower) { policy.ConsensusMinerMinPower = p } + for _, policy := range builtin14.PoStProofPolicies { + policy.ConsensusMinerMinPower = p + } + } // SetMinVerifiedDealSize sets the minimum size of a verified deal. This should @@ -281,6 +291,8 @@ func SetMinVerifiedDealSize(size abi.StoragePower) { verifreg13.MinVerifiedDealSize = size + verifreg14.MinVerifiedDealSize = size + } func GetMaxProveCommitDuration(ver actorstypes.Version, t abi.RegisteredSealProof) (abi.ChainEpoch, error) { @@ -338,6 +350,10 @@ func GetMaxProveCommitDuration(ver actorstypes.Version, t abi.RegisteredSealProo return miner13.MaxProveCommitDuration[t], nil + case actorstypes.Version14: + + return miner14.MaxProveCommitDuration[t], nil + default: return 0, xerrors.Errorf("unsupported actors version") } @@ -408,6 +424,11 @@ func SetProviderCollateralSupplyTarget(num, denom big.Int) { Denominator: denom, } + market14.ProviderCollateralSupplyTarget = builtin14.BigFrac{ + Numerator: num, + Denominator: denom, + } + } func DealProviderCollateralBounds( @@ -486,13 +507,18 @@ func DealProviderCollateralBounds( min, max := market13.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) return min, max, nil + case actorstypes.Version14: + + min, max := market14.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + return min, max, nil + default: return big.Zero(), big.Zero(), xerrors.Errorf("unsupported actors version") } } func DealDurationBounds(pieceSize abi.PaddedPieceSize) (min, max abi.ChainEpoch) { - return market13.DealDurationBounds(pieceSize) + return market14.DealDurationBounds(pieceSize) } // Sets the challenge window and scales the proving period to match (such that @@ -582,6 +608,13 @@ func SetWPoStChallengeWindow(period abi.ChainEpoch) { // scale it if we're scaling the challenge period. miner13.WPoStDisputeWindow = period * 30 + miner14.WPoStChallengeWindow = period + miner14.WPoStProvingPeriod = period * abi.ChainEpoch(miner14.WPoStPeriodDeadlines) + + // by default, this is 2x finality which is 30 periods. + // scale it if we're scaling the challenge period. + miner14.WPoStDisputeWindow = period * 30 + } func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch { @@ -639,6 +672,9 @@ func GetMaxSectorExpirationExtension(nv network.Version) (abi.ChainEpoch, error) case actorstypes.Version13: return miner13.MaxSectorExpirationExtension, nil + case actorstypes.Version14: + return miner14.MaxSectorExpirationExtension, nil + default: return 0, xerrors.Errorf("unsupported network version") } @@ -646,11 +682,11 @@ func GetMaxSectorExpirationExtension(nv network.Version) (abi.ChainEpoch, error) } func GetMinSectorExpiration() abi.ChainEpoch { - return miner13.MinSectorExpiration + return miner14.MinSectorExpiration } func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, error) { - sectorsPerPart, err := builtin13.PoStProofWindowPoStPartitionSectors(p) + sectorsPerPart, err := builtin14.PoStProofWindowPoStPartitionSectors(p) if err != nil { return 0, err } @@ -659,7 +695,7 @@ func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, e return 0, err } - return min(miner13.PoStedPartitionsMax, int(uint64(maxSectors)/sectorsPerPart)), nil + return min(miner14.PoStedPartitionsMax, int(uint64(maxSectors)/sectorsPerPart)), nil } func GetDefaultAggregationProof() abi.RegisteredAggregationProof { @@ -671,7 +707,7 @@ func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime } - return builtin13.SealProofPoliciesV11[proof].SectorMaxLifetime + return builtin14.SealProofPoliciesV11[proof].SectorMaxLifetime } func GetAddressedSectorsMax(nwVer network.Version) (int, error) { @@ -720,6 +756,9 @@ func GetAddressedSectorsMax(nwVer network.Version) (int, error) { case actorstypes.Version13: return miner13.AddressedSectorsMax, nil + case actorstypes.Version14: + return miner14.AddressedSectorsMax, nil + default: return 0, xerrors.Errorf("unsupported network version") } @@ -785,6 +824,10 @@ func GetDeclarationsMax(nwVer network.Version) (int, error) { return miner13.DeclarationsMax, nil + case actorstypes.Version14: + + return miner14.DeclarationsMax, nil + default: return 0, xerrors.Errorf("unsupported network version") } @@ -849,6 +892,10 @@ func AggregateProveCommitNetworkFee(nwVer network.Version, aggregateSize int, ba return miner13.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil + case actorstypes.Version14: + + return miner14.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil + default: return big.Zero(), xerrors.Errorf("unsupported network version") } @@ -913,6 +960,10 @@ func AggregatePreCommitNetworkFee(nwVer network.Version, aggregateSize int, base return miner13.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil + case actorstypes.Version14: + + return miner14.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil + default: return big.Zero(), xerrors.Errorf("unsupported network version") } diff --git a/chain/actors/version.go b/chain/actors/version.go index 8d84bbc1d0c..b67a4ef2166 100644 --- a/chain/actors/version.go +++ b/chain/actors/version.go @@ -14,9 +14,9 @@ const ({{range .actorVersions}} /* inline-gen start */ -var LatestVersion = 13 +var LatestVersion = 14 -var Versions = []int{0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13} +var Versions = []int{0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14} const ( Version0 Version = 0 @@ -32,6 +32,7 @@ const ( Version11 Version = 11 Version12 Version = 12 Version13 Version = 13 + Version14 Version = 14 ) /* inline-gen end */ diff --git a/chain/beacon/drand/drand.go b/chain/beacon/drand/drand.go index 852a1a31ce5..1d781b0570e 100644 --- a/chain/beacon/drand/drand.go +++ b/chain/beacon/drand/drand.go @@ -97,7 +97,7 @@ func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes if err != nil { return nil, xerrors.Errorf("could not create http drand client: %w", err) } - hc.(DrandHTTPClient).SetUserAgent("drand-client-lotus/" + build.BuildVersion) + hc.(DrandHTTPClient).SetUserAgent("drand-client-lotus/" + build.NodeBuildVersion) clients = append(clients, hc) } diff --git a/chain/consensus/common.go b/chain/consensus/common.go index 8fee0d4c2f1..f679b92b682 100644 --- a/chain/consensus/common.go +++ b/chain/consensus/common.go @@ -29,6 +29,7 @@ import ( "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/lib/async" "github.com/filecoin-project/lotus/metrics" @@ -131,6 +132,18 @@ func CommonBlkChecks(ctx context.Context, sm *stmgr.StateManager, cs *store.Chai } } +func IsValidEthTxForSending(nv network.Version, smsg *types.SignedMessage) bool { + ethTx, err := ethtypes.EthTransactionFromSignedFilecoinMessage(smsg) + if err != nil { + return false + } + + if nv < network.Version23 && ethTx.Type() != ethtypes.EIP1559TxType { + return false + } + return true +} + func IsValidForSending(nv network.Version, act *types.Actor) bool { // Before nv18 (Hygge), we only supported built-in account actors as senders. // @@ -276,6 +289,10 @@ func checkBlockMessages(ctx context.Context, sm *stmgr.StateManager, cs *store.C return xerrors.Errorf("block had invalid signed message at index %d: %w", i, err) } + if m.Signature.Type == crypto.SigTypeDelegated && !IsValidEthTxForSending(nv, m) { + return xerrors.Errorf("network version should be atleast NV23 for sending legacy ETH transactions; but current network version is %d", nv) + } + if err := checkMsg(m); err != nil { return xerrors.Errorf("block had invalid secpk message at index %d: %w", i, err) } diff --git a/chain/consensus/compute_state.go b/chain/consensus/compute_state.go index 78369ec20b4..a5e82a57ffe 100644 --- a/chain/consensus/compute_state.go +++ b/chain/consensus/compute_state.go @@ -54,6 +54,7 @@ func NewActorRegistry() *vm.ActorRegistry { inv.Register(actorstypes.Version11, vm.ActorsVersionPredicate(actorstypes.Version11), builtin.MakeRegistry(actorstypes.Version11)) inv.Register(actorstypes.Version12, vm.ActorsVersionPredicate(actorstypes.Version12), builtin.MakeRegistry(actorstypes.Version12)) inv.Register(actorstypes.Version13, vm.ActorsVersionPredicate(actorstypes.Version13), builtin.MakeRegistry(actorstypes.Version13)) + inv.Register(actorstypes.Version14, vm.ActorsVersionPredicate(actorstypes.Version14), builtin.MakeRegistry(actorstypes.Version14)) return inv } diff --git a/chain/consensus/filcns/upgrades.go b/chain/consensus/filcns/upgrades.go index eaa4f3b200d..2a003e82e32 100644 --- a/chain/consensus/filcns/upgrades.go +++ b/chain/consensus/filcns/upgrades.go @@ -27,6 +27,7 @@ import ( nv21 "github.com/filecoin-project/go-state-types/builtin/v12/migration" system12 "github.com/filecoin-project/go-state-types/builtin/v12/system" nv22 "github.com/filecoin-project/go-state-types/builtin/v13/migration" + nv23 "github.com/filecoin-project/go-state-types/builtin/v14/migration" nv17 "github.com/filecoin-project/go-state-types/builtin/v9/migration" "github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/migration" @@ -302,6 +303,17 @@ func DefaultUpgradeSchedule() stmgr.UpgradeSchedule { Height: build.UpgradeCalibrationDragonFixHeight, Network: network.Version22, Migration: upgradeActorsV13VerifregFix(calibnetv13BuggyVerifregCID1, calibnetv13CorrectManifestCID1), + }, { + Height: build.UpgradeAussieHeight, + Network: network.Version23, + Migration: UpgradeActorsV14, + PreMigrations: []stmgr.PreMigration{{ + PreMigration: PreUpgradeActorsV14, + StartWithin: 120, + DontStartWithin: 15, + StopWithin: 10, + }}, + Expensive: true, }, } @@ -2468,6 +2480,108 @@ func upgradeActorsV13VerifregFix(oldBuggyVerifregCID, newManifestCID cid.Cid) fu } } +func PreUpgradeActorsV14(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error { + // Use half the CPUs for pre-migration, but leave at least 3. + workerCount := MigrationMaxWorkerCount + if workerCount <= 4 { + workerCount = 1 + } else { + workerCount /= 2 + } + + lbts, lbRoot, err := stmgr.GetLookbackTipSetForRound(ctx, sm, ts, epoch) + if err != nil { + return xerrors.Errorf("error getting lookback ts for premigration: %w", err) + } + + config := migration.Config{ + MaxWorkers: uint(workerCount), + ProgressLogPeriod: time.Minute * 5, + } + + _, err = upgradeActorsV14Common(ctx, sm, cache, lbRoot, epoch, lbts, config) + return err +} + +func UpgradeActorsV14(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, + root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + // Use all the CPUs except 2. + workerCount := MigrationMaxWorkerCount - 3 + if workerCount <= 0 { + workerCount = 1 + } + config := migration.Config{ + MaxWorkers: uint(workerCount), + JobQueueSize: 1000, + ResultQueueSize: 100, + ProgressLogPeriod: 10 * time.Second, + } + newRoot, err := upgradeActorsV14Common(ctx, sm, cache, root, epoch, ts, config) + if err != nil { + return cid.Undef, xerrors.Errorf("migrating actors v14 state: %w", err) + } + return newRoot, nil +} + +func upgradeActorsV14Common( + ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, + root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet, + config migration.Config, +) (cid.Cid, error) { + writeStore := blockstore.NewAutobatch(ctx, sm.ChainStore().StateBlockstore(), units.GiB/4) + adtStore := store.ActorStore(ctx, writeStore) + // ensure that the manifest is loaded in the blockstore + if err := bundle.LoadBundles(ctx, writeStore, actorstypes.Version14); err != nil { + return cid.Undef, xerrors.Errorf("failed to load manifest bundle: %w", err) + } + + // Load the state root. + var stateRoot types.StateRoot + if err := adtStore.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err) + } + + if stateRoot.Version != types.StateTreeVersion5 { + return cid.Undef, xerrors.Errorf( + "expected state root version 5 for actors v14 upgrade, got %d", + stateRoot.Version, + ) + } + + manifest, ok := actors.GetManifest(actorstypes.Version14) + if !ok { + return cid.Undef, xerrors.Errorf("no manifest CID for v14 upgrade") + } + + // Perform the migration + newHamtRoot, err := nv23.MigrateStateTree(ctx, adtStore, manifest, stateRoot.Actors, epoch, config, + migrationLogger{}, cache) + if err != nil { + return cid.Undef, xerrors.Errorf("upgrading to actors v14: %w", err) + } + + // Persist the result. + newRoot, err := adtStore.Put(ctx, &types.StateRoot{ + Version: types.StateTreeVersion5, + Actors: newHamtRoot, + Info: stateRoot.Info, + }) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err) + } + + // Persists the new tree and shuts down the flush worker + if err := writeStore.Flush(ctx); err != nil { + return cid.Undef, xerrors.Errorf("writeStore flush failed: %w", err) + } + + if err := writeStore.Shutdown(ctx); err != nil { + return cid.Undef, xerrors.Errorf("writeStore shutdown failed: %w", err) + } + + return newRoot, nil +} + //////////////////// // Example upgrade function if upgrade requires only code changes diff --git a/chain/consensus/signatures.go b/chain/consensus/signatures.go index cb0e229a85b..4343bcbbe53 100644 --- a/chain/consensus/signatures.go +++ b/chain/consensus/signatures.go @@ -18,34 +18,42 @@ import ( // must be recognized by the registered verifier for the signature type. func AuthenticateMessage(msg *types.SignedMessage, signer address.Address) error { var digest []byte + signatureType := msg.Signature.Type + signatureCopy := msg.Signature - typ := msg.Signature.Type - switch typ { + switch signatureType { case crypto.SigTypeDelegated: - txArgs, err := ethtypes.EthTxArgsFromUnsignedEthMessage(&msg.Message) + signatureCopy.Data = make([]byte, len(msg.Signature.Data)) + copy(signatureCopy.Data, msg.Signature.Data) + ethTx, err := ethtypes.EthTransactionFromSignedFilecoinMessage(msg) if err != nil { - return xerrors.Errorf("failed to reconstruct eth transaction: %w", err) + return xerrors.Errorf("failed to reconstruct Ethereum transaction: %w", err) } - roundTripMsg, err := txArgs.ToUnsignedMessage(msg.Message.From) + + filecoinMsg, err := ethTx.ToUnsignedFilecoinMessage(msg.Message.From) if err != nil { - return xerrors.Errorf("failed to reconstruct filecoin msg: %w", err) + return xerrors.Errorf("failed to reconstruct Filecoin message: %w", err) } - if !msg.Message.Equals(roundTripMsg) { - return xerrors.New("ethereum tx failed to roundtrip") + if !msg.Message.Equals(filecoinMsg) { + return xerrors.New("Ethereum transaction roundtrip mismatch") } - rlpEncodedMsg, err := txArgs.ToRlpUnsignedMsg() + rlpEncodedMsg, err := ethTx.ToRlpUnsignedMsg() if err != nil { - return xerrors.Errorf("failed to repack eth rlp message: %w", err) + return xerrors.Errorf("failed to encode RLP message: %w", err) } digest = rlpEncodedMsg + signatureCopy.Data, err = ethTx.ToVerifiableSignature(signatureCopy.Data) + if err != nil { + return xerrors.Errorf("failed to verify signature: %w", err) + } default: digest = msg.Message.Cid().Bytes() } - if err := sigs.Verify(&msg.Signature, signer, digest); err != nil { - return xerrors.Errorf("message %s has invalid signature (type %d): %w", msg.Cid(), typ, err) + if err := sigs.Verify(&signatureCopy, signer, digest); err != nil { + return xerrors.Errorf("invalid signature for message %s (type %d): %w", msg.Cid(), signatureType, err) } return nil } diff --git a/chain/events/filter/event.go b/chain/events/filter/event.go index 0accc551ab4..fa17d235ea9 100644 --- a/chain/events/filter/event.go +++ b/chain/events/filter/event.go @@ -57,7 +57,7 @@ var _ Filter = (*eventFilter)(nil) type CollectedEvent struct { Entries []types.EventEntry EmitterAddr address.Address // address of emitter - EventIdx int // index of the event within the list of emitted events + EventIdx int // index of the event within the list of emitted events in a given tipset Reverted bool Height abi.ChainEpoch TipSetKey types.TipSetKey // tipset that contained the message @@ -94,8 +94,11 @@ func (f *eventFilter) CollectEvents(ctx context.Context, te *TipSetEvents, rever if err != nil { return xerrors.Errorf("load executed messages: %w", err) } + + eventCount := 0 + for msgIdx, em := range ems { - for evIdx, ev := range em.Events() { + for _, ev := range em.Events() { // lookup address corresponding to the actor id addr, found := addressLookups[ev.Emitter] if !found { @@ -119,7 +122,7 @@ func (f *eventFilter) CollectEvents(ctx context.Context, te *TipSetEvents, rever cev := &CollectedEvent{ Entries: ev.Entries, EmitterAddr: addr, - EventIdx: evIdx, + EventIdx: eventCount, Reverted: revert, Height: te.msgTs.Height(), TipSetKey: te.msgTs.Key(), @@ -141,6 +144,7 @@ func (f *eventFilter) CollectEvents(ctx context.Context, te *TipSetEvents, rever } f.collected = append(f.collected, cev) f.mu.Unlock() + eventCount++ } } diff --git a/chain/events/filter/index.go b/chain/events/filter/index.go index 9a8f8bca4c5..5ebb8fb580c 100644 --- a/chain/events/filter/index.go +++ b/chain/events/filter/index.go @@ -26,12 +26,15 @@ var pragmas = []string{ "PRAGMA temp_store = memory", "PRAGMA mmap_size = 30000000000", "PRAGMA page_size = 32768", - "PRAGMA auto_vacuum = NONE", // not useful until we implement GC + "PRAGMA auto_vacuum = NONE", "PRAGMA automatic_index = OFF", "PRAGMA journal_mode = WAL", - "PRAGMA read_uncommitted = ON", + "PRAGMA wal_autocheckpoint = 256", // checkpoint @ 256 pages + "PRAGMA journal_size_limit = 0", // always reset journal and wal files } +// Any changes to this schema should be matched for the `lotus-shed indexes backfill-events` command + var ddls = []string{ `CREATE TABLE IF NOT EXISTS event ( id INTEGER PRIMARY KEY, @@ -72,6 +75,7 @@ var ddls = []string{ `INSERT OR IGNORE INTO _meta (version) VALUES (2)`, `INSERT OR IGNORE INTO _meta (version) VALUES (3)`, `INSERT OR IGNORE INTO _meta (version) VALUES (4)`, + `INSERT OR IGNORE INTO _meta (version) VALUES (5)`, } var ( @@ -79,7 +83,7 @@ var ( ) const ( - schemaVersion = 4 + schemaVersion = 5 eventExists = `SELECT MAX(id) FROM event WHERE height=? AND tipset_key=? AND tipset_key_cid=? AND emitter_addr=? AND event_index=? AND message_cid=? AND message_index=?` insertEvent = `INSERT OR IGNORE INTO event(height, tipset_key, tipset_key_cid, emitter_addr, event_index, message_cid, message_index, reverted) VALUES(?, ?, ?, ?, ?, ?, ?, ?)` @@ -365,9 +369,42 @@ func (ei *EventIndex) migrateToVersion4(ctx context.Context) error { return xerrors.Errorf("commit transaction: %w", err) } + log.Infof("Successfully migrated event index from version 3 to version 4 in %s", time.Since(now)) + return nil +} + +func (ei *EventIndex) migrateToVersion5(ctx context.Context) error { + now := time.Now() + + tx, err := ei.db.BeginTx(ctx, nil) + if err != nil { + return xerrors.Errorf("begin transaction: %w", err) + } + defer func() { _ = tx.Rollback() }() + + stmtEventIndexUpdate, err := tx.PrepareContext(ctx, "UPDATE event SET event_index = (SELECT COUNT(*) FROM event e2 WHERE e2.tipset_key_cid = event.tipset_key_cid AND e2.id <= event.id) - 1") + if err != nil { + return xerrors.Errorf("prepare stmtEventIndexUpdate: %w", err) + } + + _, err = stmtEventIndexUpdate.ExecContext(ctx) + if err != nil { + return xerrors.Errorf("update event index: %w", err) + } + + _, err = tx.ExecContext(ctx, "INSERT OR IGNORE INTO _meta (version) VALUES (5)") + if err != nil { + return xerrors.Errorf("increment _meta version: %w", err) + } + + err = tx.Commit() + if err != nil { + return xerrors.Errorf("commit transaction: %w", err) + } + ei.vacuumDBAndCheckpointWAL(ctx) - log.Infof("Successfully migrated event index from version 3 to version 4 in %s", time.Since(now)) + log.Infof("Successfully migrated event index from version 4 to version 5 in %s", time.Since(now)) return nil } @@ -402,6 +439,9 @@ func NewEventIndex(ctx context.Context, path string, chainStore *store.ChainStor eventIndex := EventIndex{db: db} q, err := db.QueryContext(ctx, "SELECT name FROM sqlite_master WHERE type='table' AND name='_meta';") + if q != nil { + defer func() { _ = q.Close() }() + } if errors.Is(err, sql.ErrNoRows) || !q.Next() { // empty database, create the schema for _, ddl := range ddls { @@ -452,6 +492,16 @@ func NewEventIndex(ctx context.Context, path string, chainStore *store.ChainStor version = 4 } + if version == 4 { + log.Infof("Upgrading event index from version 4 to version 5") + err = eventIndex.migrateToVersion5(ctx) + if err != nil { + _ = db.Close() + return nil, xerrors.Errorf("could not migrate event index schema from version 4 to version 5: %w", err) + } + version = 5 + } + if version != schemaVersion { _ = db.Close() return nil, xerrors.Errorf("invalid database version: got %d, expected %d", version, schemaVersion) @@ -475,7 +525,7 @@ func (ei *EventIndex) Close() error { } func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, revert bool, resolver func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool)) error { - tx, err := ei.db.Begin() + tx, err := ei.db.BeginTx(ctx, nil) if err != nil { return xerrors.Errorf("begin transaction: %w", err) } @@ -505,10 +555,11 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever return xerrors.Errorf("load executed messages: %w", err) } + eventCount := 0 // iterate over all executed messages in this tipset and insert them into the database if they // don't exist, otherwise mark them as not reverted for msgIdx, em := range ems { - for evIdx, ev := range em.Events() { + for _, ev := range em.Events() { addr, found := addressLookups[ev.Emitter] if !found { var ok bool @@ -532,7 +583,7 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever te.msgTs.Key().Bytes(), // tipset_key tsKeyCid.Bytes(), // tipset_key_cid addr.Bytes(), // emitter_addr - evIdx, // event_index + eventCount, // event_index em.Message().Cid().Bytes(), // message_cid msgIdx, // message_index ).Scan(&entryID) @@ -547,7 +598,7 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever te.msgTs.Key().Bytes(), // tipset_key tsKeyCid.Bytes(), // tipset_key_cid addr.Bytes(), // emitter_addr - evIdx, // event_index + eventCount, // event_index em.Message().Cid().Bytes(), // message_cid msgIdx, // message_index false, // reverted @@ -582,7 +633,7 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever te.msgTs.Key().Bytes(), // tipset_key tsKeyCid.Bytes(), // tipset_key_cid addr.Bytes(), // emitter_addr - evIdx, // event_index + eventCount, // event_index em.Message().Cid().Bytes(), // message_cid msgIdx, // message_index ) @@ -600,6 +651,7 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever log.Warnf("restored %d events but expected only one to exist", rowsAffected) } } + eventCount++ } } @@ -695,6 +747,7 @@ func (ei *EventIndex) prefillFilter(ctx context.Context, f *eventFilter, exclude if err != nil { return xerrors.Errorf("prepare prefill query: %w", err) } + defer func() { _ = stmt.Close() }() q, err := stmt.QueryContext(ctx, values...) if err != nil { @@ -703,6 +756,7 @@ func (ei *EventIndex) prefillFilter(ctx context.Context, f *eventFilter, exclude } return xerrors.Errorf("exec prefill query: %w", err) } + defer func() { _ = q.Close() }() var ces []*CollectedEvent var currentID int64 = -1 @@ -791,7 +845,6 @@ func (ei *EventIndex) prefillFilter(ctx context.Context, f *eventFilter, exclude Codec: row.codec, Value: row.value, }) - } if ce != nil { diff --git a/chain/market/fundmanager_test.go b/chain/market/fundmanager_test.go index d79afbc513b..fb359aa0767 100644 --- a/chain/market/fundmanager_test.go +++ b/chain/market/fundmanager_test.go @@ -294,7 +294,7 @@ func TestFundManagerReserveByWallet(t *testing.T) { checkAddMessageFields(t, msg, walletAddrB, s.acctAddr, types.BigAdd(amtB1, amtB2)) } -// TestFundManagerWithdrawal verifies that as many withdraw operations as +// TestFundManagerWithdrawalLimit verifies that as many withdraw operations as // possible are processed func TestFundManagerWithdrawalLimit(t *testing.T) { //stm: @MARKET_RESERVE_FUNDS_001, @MARKET_RELEASE_FUNDS_001, @MARKET_WITHDRAW_FUNDS_001 diff --git a/chain/messagepool/messagepool.go b/chain/messagepool/messagepool.go index c8a2493faa6..c7437c99168 100644 --- a/chain/messagepool/messagepool.go +++ b/chain/messagepool/messagepool.go @@ -887,6 +887,11 @@ func (mp *MessagePool) addTs(ctx context.Context, m *types.SignedMessage, curTs nv := mp.api.StateNetworkVersion(ctx, epoch) // TODO: I'm not thrilled about depending on filcns here, but I prefer this to duplicating logic + + if m.Signature.Type == crypto.SigTypeDelegated && !consensus.IsValidEthTxForSending(nv, m) { + return false, xerrors.Errorf("network version should be atleast NV23 for sending legacy ETH transactions; but current network version is %d", nv) + } + if !consensus.IsValidForSending(nv, senderAct) { return false, xerrors.Errorf("sender actor %s is not a valid top-level sender", m.Message.From) } diff --git a/chain/messagesigner/messagesigner.go b/chain/messagesigner/messagesigner.go index cd31a3b739e..8a00bbc5c70 100644 --- a/chain/messagesigner/messagesigner.go +++ b/chain/messagesigner/messagesigner.go @@ -196,7 +196,7 @@ func (ms *MessageSigner) dstoreKey(addr address.Address) datastore.Key { func SigningBytes(msg *types.Message, sigType address.Protocol) ([]byte, error) { if sigType == address.Delegated { - txArgs, err := ethtypes.EthTxArgsFromUnsignedEthMessage(msg) + txArgs, err := ethtypes.Eth1559TxArgsFromUnsignedFilecoinMessage(msg) if err != nil { return nil, xerrors.Errorf("failed to reconstruct eth transaction: %w", err) } diff --git a/chain/state/statetree.go b/chain/state/statetree.go index 03cd98d95ad..38a713c59a9 100644 --- a/chain/state/statetree.go +++ b/chain/state/statetree.go @@ -156,7 +156,7 @@ func VersionForNetwork(ver network.Version) (types.StateTreeVersion, error) { case network.Version13, network.Version14, network.Version15, network.Version16, network.Version17: return types.StateTreeVersion4, nil - case network.Version18, network.Version19, network.Version20, network.Version21, network.Version22: + case network.Version18, network.Version19, network.Version20, network.Version21, network.Version22, network.Version23: return types.StateTreeVersion5, nil default: diff --git a/chain/stmgr/call.go b/chain/stmgr/call.go index 61056528f11..7f2a57a6112 100644 --- a/chain/stmgr/call.go +++ b/chain/stmgr/call.go @@ -25,6 +25,14 @@ import ( "github.com/filecoin-project/lotus/chain/vm" ) +type execMessageStrategy int + +const ( + execNoMessages execMessageStrategy = iota // apply no prior or current tipset messages + execAllMessages // apply all prior and current tipset messages + execSameSenderMessages // apply all prior messages and any current tipset messages from the same sender +) + var ErrExpensiveFork = errors.New("refusing explicit call due to state fork at epoch") // Call applies the given message to the given tipset's parent state, at the epoch following the @@ -48,12 +56,24 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types. msg.Value = types.NewInt(0) } - return sm.callInternal(ctx, msg, nil, ts, cid.Undef, sm.GetNetworkVersion, false, false) + return sm.callInternal(ctx, msg, nil, ts, cid.Undef, sm.GetNetworkVersion, false, execSameSenderMessages) +} + +// ApplyOnStateWithGas applies the given message on top of the given state root with gas tracing enabled +func (sm *StateManager) ApplyOnStateWithGas(ctx context.Context, stateCid cid.Cid, msg *types.Message, ts *types.TipSet) (*api.InvocResult, error) { + return sm.callInternal(ctx, msg, nil, ts, stateCid, sm.GetNetworkVersion, true, execNoMessages) } // CallWithGas calculates the state for a given tipset, and then applies the given message on top of that state. func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, priorMsgs []types.ChainMsg, ts *types.TipSet, applyTsMessages bool) (*api.InvocResult, error) { - return sm.callInternal(ctx, msg, priorMsgs, ts, cid.Undef, sm.GetNetworkVersion, true, applyTsMessages) + var strategy execMessageStrategy + if applyTsMessages { + strategy = execAllMessages + } else { + strategy = execSameSenderMessages + } + + return sm.callInternal(ctx, msg, priorMsgs, ts, cid.Undef, sm.GetNetworkVersion, true, strategy) } // CallAtStateAndVersion allows you to specify a message to execute on the given stateCid and network version. @@ -64,14 +84,14 @@ func (sm *StateManager) CallAtStateAndVersion(ctx context.Context, msg *types.Me nvGetter := func(context.Context, abi.ChainEpoch) network.Version { return v } - - return sm.callInternal(ctx, msg, nil, nil, stateCid, nvGetter, true, false) + return sm.callInternal(ctx, msg, nil, nil, stateCid, nvGetter, true, execSameSenderMessages) } // - If no tipset is specified, the first tipset without an expensive migration or one in its parent is used. // - If executing a message at a given tipset or its parent would trigger an expensive migration, the call will // fail with ErrExpensiveFork. -func (sm *StateManager) callInternal(ctx context.Context, msg *types.Message, priorMsgs []types.ChainMsg, ts *types.TipSet, stateCid cid.Cid, nvGetter rand.NetworkVersionGetter, checkGas, applyTsMessages bool) (*api.InvocResult, error) { +func (sm *StateManager) callInternal(ctx context.Context, msg *types.Message, priorMsgs []types.ChainMsg, ts *types.TipSet, stateCid cid.Cid, + nvGetter rand.NetworkVersionGetter, checkGas bool, strategy execMessageStrategy) (*api.InvocResult, error) { ctx, span := trace.StartSpan(ctx, "statemanager.callInternal") defer span.End() @@ -95,7 +115,7 @@ func (sm *StateManager) callInternal(ctx context.Context, msg *types.Message, pr return nil, xerrors.Errorf("failed to find a non-forking epoch: %w", err) } // Checks for expensive forks from the parents to the tipset, including nil tipsets - if !sm.hasExpensiveForkBetween(pts.Height(), ts.Height()+1) { + if !sm.HasExpensiveForkBetween(pts.Height(), ts.Height()+1) { break } @@ -106,7 +126,7 @@ func (sm *StateManager) callInternal(ctx context.Context, msg *types.Message, pr if err != nil { return nil, xerrors.Errorf("failed to find a non-forking epoch: %w", err) } - if sm.hasExpensiveForkBetween(pts.Height(), ts.Height()+1) { + if sm.HasExpensiveForkBetween(pts.Height(), ts.Height()+1) { return nil, ErrExpensiveFork } } @@ -117,24 +137,6 @@ func (sm *StateManager) callInternal(ctx context.Context, msg *types.Message, pr if stateCid == cid.Undef { stateCid = ts.ParentState() } - tsMsgs, err := sm.cs.MessagesForTipset(ctx, ts) - if err != nil { - return nil, xerrors.Errorf("failed to lookup messages for parent tipset: %w", err) - } - - if applyTsMessages { - priorMsgs = append(tsMsgs, priorMsgs...) - } else { - var filteredTsMsgs []types.ChainMsg - for _, tsMsg := range tsMsgs { - //TODO we should technically be normalizing the filecoin address of from when we compare here - if tsMsg.VMMessage().From == msg.VMMessage().From { - filteredTsMsgs = append(filteredTsMsgs, tsMsg) - } - } - priorMsgs = append(filteredTsMsgs, priorMsgs...) - } - // Technically, the tipset we're passing in here should be ts+1, but that may not exist. stateCid, err = sm.HandleStateForks(ctx, stateCid, ts.Height(), nil, ts) if err != nil { @@ -169,18 +171,40 @@ func (sm *StateManager) callInternal(ctx context.Context, msg *types.Message, pr if err != nil { return nil, xerrors.Errorf("failed to set up vm: %w", err) } - for i, m := range priorMsgs { - _, err = vmi.ApplyMessage(ctx, m) + + switch strategy { + case execNoMessages: + // Do nothing + case execAllMessages, execSameSenderMessages: + tsMsgs, err := sm.cs.MessagesForTipset(ctx, ts) if err != nil { - return nil, xerrors.Errorf("applying prior message (%d, %s): %w", i, m.Cid(), err) + return nil, xerrors.Errorf("failed to lookup messages for parent tipset: %w", err) + } + if strategy == execAllMessages { + priorMsgs = append(tsMsgs, priorMsgs...) + } else if strategy == execSameSenderMessages { + var filteredTsMsgs []types.ChainMsg + for _, tsMsg := range tsMsgs { + //TODO we should technically be normalizing the filecoin address of from when we compare here + if tsMsg.VMMessage().From == msg.VMMessage().From { + filteredTsMsgs = append(filteredTsMsgs, tsMsg) + } + } + priorMsgs = append(filteredTsMsgs, priorMsgs...) + } + for i, m := range priorMsgs { + _, err = vmi.ApplyMessage(ctx, m) + if err != nil { + return nil, xerrors.Errorf("applying prior message (%d, %s): %w", i, m.Cid(), err) + } } - } - // We flush to get the VM's view of the state tree after applying the above messages - // This is needed to get the correct nonce from the actor state to match the VM - stateCid, err = vmi.Flush(ctx) - if err != nil { - return nil, xerrors.Errorf("flushing vm: %w", err) + // We flush to get the VM's view of the state tree after applying the above messages + // This is needed to get the correct nonce from the actor state to match the VM + stateCid, err = vmi.Flush(ctx) + if err != nil { + return nil, xerrors.Errorf("flushing vm: %w", err) + } } stTree, err := state.LoadStateTree(cbor.NewCborStore(buffStore), stateCid) diff --git a/chain/stmgr/forks.go b/chain/stmgr/forks.go index 9a236196187..c6c513e3a26 100644 --- a/chain/stmgr/forks.go +++ b/chain/stmgr/forks.go @@ -227,7 +227,7 @@ func (sm *StateManager) HandleStateForks(ctx context.Context, root cid.Cid, heig // Returns true executing tipsets between the specified heights would trigger an expensive // migration. NOTE: migrations occurring _at_ the target height are not included, as they're // executed _after_ the target height. -func (sm *StateManager) hasExpensiveForkBetween(parent, height abi.ChainEpoch) bool { +func (sm *StateManager) HasExpensiveForkBetween(parent, height abi.ChainEpoch) bool { for h := parent; h < height; h++ { if _, ok := sm.expensiveUpgrades[h]; ok { return true diff --git a/chain/stmgr/supply.go b/chain/stmgr/supply.go index 1aea5cc65a4..8ee369750e8 100644 --- a/chain/stmgr/supply.go +++ b/chain/stmgr/supply.go @@ -10,6 +10,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" "github.com/filecoin-project/lotus/api" @@ -303,7 +304,10 @@ func getFilPowerLocked(ctx context.Context, st *state.StateTree) (abi.TokenAmoun return pst.TotalLocked() } -func GetFilLocked(ctx context.Context, st *state.StateTree) (abi.TokenAmount, error) { +func GetFilLocked(ctx context.Context, st *state.StateTree, nv network.Version) (abi.TokenAmount, error) { + if nv >= network.Version23 { + return getFilPowerLocked(ctx, st) + } filMarketLocked, err := getFilMarketLocked(ctx, st) if err != nil { @@ -337,6 +341,7 @@ func (sm *StateManager) GetVMCirculatingSupply(ctx context.Context, height abi.C } func (sm *StateManager) GetVMCirculatingSupplyDetailed(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (api.CirculatingSupply, error) { + nv := sm.GetNetworkVersion(ctx, height) filVested, err := sm.GetFilVested(ctx, height) if err != nil { return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filVested: %w", err) @@ -360,7 +365,7 @@ func (sm *StateManager) GetVMCirculatingSupplyDetailed(ctx context.Context, heig return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filBurnt: %w", err) } - filLocked, err := GetFilLocked(ctx, st) + filLocked, err := GetFilLocked(ctx, st, nv) if err != nil { return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filLocked: %w", err) } @@ -387,6 +392,8 @@ func (sm *StateManager) GetVMCirculatingSupplyDetailed(ctx context.Context, heig func (sm *StateManager) GetCirculatingSupply(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (abi.TokenAmount, error) { circ := big.Zero() unCirc := big.Zero() + nv := sm.GetNetworkVersion(ctx, height) + err := st.ForEach(func(a address.Address, actor *types.Actor) error { // this can be a lengthy operation, we need to cancel early when // the context is cancelled to avoid resource exhaustion @@ -415,19 +422,23 @@ func (sm *StateManager) GetCirculatingSupply(ctx context.Context, height abi.Cha unCirc = big.Add(unCirc, actor.Balance) case a == market.Address: - mst, err := market.Load(sm.cs.ActorStore(ctx), actor) - if err != nil { - return err - } - - lb, err := mst.TotalLocked() - if err != nil { - return err + if nv >= network.Version23 { + circ = big.Add(circ, actor.Balance) + } else { + mst, err := market.Load(sm.cs.ActorStore(ctx), actor) + if err != nil { + return err + } + + lb, err := mst.TotalLocked() + if err != nil { + return err + } + + circ = big.Add(circ, big.Sub(actor.Balance, lb)) + unCirc = big.Add(unCirc, lb) } - circ = big.Add(circ, big.Sub(actor.Balance, lb)) - unCirc = big.Add(unCirc, lb) - case builtin.IsAccountActor(actor.Code) || builtin.IsPaymentChannelActor(actor.Code) || builtin.IsEthAccountActor(actor.Code) || diff --git a/chain/sub/incoming.go b/chain/sub/incoming.go index b50ddc46779..42d270a95f5 100644 --- a/chain/sub/incoming.go +++ b/chain/sub/incoming.go @@ -16,6 +16,7 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/connmgr" "github.com/libp2p/go-libp2p/core/peer" + mh "github.com/multiformats/go-multihash" "go.opencensus.io/stats" "go.opencensus.io/tag" "golang.org/x/xerrors" @@ -30,16 +31,16 @@ import ( "github.com/filecoin-project/lotus/chain/sub/ratelimit" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/metrics" - "github.com/filecoin-project/lotus/node/impl/client" "github.com/filecoin-project/lotus/node/impl/full" ) var log = logging.Logger("sub") +var DefaultHashFunction = uint64(mh.BLAKE2B_MIN + 31) var msgCidPrefix = cid.Prefix{ Version: 1, Codec: cid.DagCBOR, - MhType: client.DefaultHashFunction, + MhType: DefaultHashFunction, MhLength: 32, } diff --git a/chain/types/ethtypes/eth_1559_transactions.go b/chain/types/ethtypes/eth_1559_transactions.go new file mode 100644 index 00000000000..1af2598735e --- /dev/null +++ b/chain/types/ethtypes/eth_1559_transactions.go @@ -0,0 +1,342 @@ +package ethtypes + +import ( + "fmt" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + typescrypto "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" +) + +var _ EthTransaction = (*Eth1559TxArgs)(nil) + +type Eth1559TxArgs struct { + ChainID int `json:"chainId"` + Nonce int `json:"nonce"` + To *EthAddress `json:"to"` + Value big.Int `json:"value"` + MaxFeePerGas big.Int `json:"maxFeePerGas"` + MaxPriorityFeePerGas big.Int `json:"maxPriorityFeePerGas"` + GasLimit int `json:"gasLimit"` + Input []byte `json:"input"` + V big.Int `json:"v"` + R big.Int `json:"r"` + S big.Int `json:"s"` +} + +func (tx *Eth1559TxArgs) ToUnsignedFilecoinMessage(from address.Address) (*types.Message, error) { + if tx.ChainID != build.Eip155ChainId { + return nil, fmt.Errorf("invalid chain id: %d", tx.ChainID) + } + mi, err := getFilecoinMethodInfo(tx.To, tx.Input) + if err != nil { + return nil, xerrors.Errorf("failed to get method info: %w", err) + } + + return &types.Message{ + Version: 0, + To: mi.to, + From: from, + Nonce: uint64(tx.Nonce), + Value: tx.Value, + GasLimit: int64(tx.GasLimit), + GasFeeCap: tx.MaxFeePerGas, + GasPremium: tx.MaxPriorityFeePerGas, + Method: mi.method, + Params: mi.params, + }, nil +} + +func (tx *Eth1559TxArgs) ToRlpUnsignedMsg() ([]byte, error) { + encoded, err := toRlpUnsignedMsg(tx) + if err != nil { + return nil, err + } + return append([]byte{EIP1559TxType}, encoded...), nil +} + +func (tx *Eth1559TxArgs) TxHash() (EthHash, error) { + rlp, err := tx.ToRlpSignedMsg() + if err != nil { + return EmptyEthHash, err + } + + return EthHashFromTxBytes(rlp), nil +} + +func (tx *Eth1559TxArgs) ToRlpSignedMsg() ([]byte, error) { + encoded, err := toRlpSignedMsg(tx, tx.V, tx.R, tx.S) + if err != nil { + return nil, err + } + return append([]byte{EIP1559TxType}, encoded...), nil +} + +func (tx *Eth1559TxArgs) Signature() (*typescrypto.Signature, error) { + r := tx.R.Int.Bytes() + s := tx.S.Int.Bytes() + v := tx.V.Int.Bytes() + + sig := append([]byte{}, padLeadingZeros(r, 32)...) + sig = append(sig, padLeadingZeros(s, 32)...) + if len(v) == 0 { + sig = append(sig, 0) + } else { + sig = append(sig, v[0]) + } + + if len(sig) != 65 { + return nil, xerrors.Errorf("signature is not 65 bytes") + } + return &typescrypto.Signature{ + Type: typescrypto.SigTypeDelegated, Data: sig, + }, nil +} + +func (tx *Eth1559TxArgs) Sender() (address.Address, error) { + return sender(tx) +} + +func (tx *Eth1559TxArgs) Type() int { + return EIP1559TxType +} + +func (tx *Eth1559TxArgs) ToVerifiableSignature(sig []byte) ([]byte, error) { + return sig, nil +} + +func (tx *Eth1559TxArgs) ToEthTx(smsg *types.SignedMessage) (EthTx, error) { + from, err := EthAddressFromFilecoinAddress(smsg.Message.From) + if err != nil { + return EthTx{}, xerrors.Errorf("sender was not an eth account") + } + hash, err := tx.TxHash() + if err != nil { + return EthTx{}, err + } + gasFeeCap := EthBigInt(tx.MaxFeePerGas) + gasPremium := EthBigInt(tx.MaxPriorityFeePerGas) + + ethTx := EthTx{ + ChainID: EthUint64(build.Eip155ChainId), + Type: EIP1559TxType, + Nonce: EthUint64(tx.Nonce), + Hash: hash, + To: tx.To, + Value: EthBigInt(tx.Value), + Input: tx.Input, + Gas: EthUint64(tx.GasLimit), + MaxFeePerGas: &gasFeeCap, + MaxPriorityFeePerGas: &gasPremium, + From: from, + R: EthBigInt(tx.R), + S: EthBigInt(tx.S), + V: EthBigInt(tx.V), + } + + return ethTx, nil +} + +func (tx *Eth1559TxArgs) InitialiseSignature(sig typescrypto.Signature) error { + if sig.Type != typescrypto.SigTypeDelegated { + return xerrors.Errorf("RecoverSignature only supports Delegated signature") + } + + if len(sig.Data) != EthEIP1559TxSignatureLen { + return xerrors.Errorf("signature should be 65 bytes long, but got %d bytes", len(sig.Data)) + } + + r_, err := parseBigInt(sig.Data[0:32]) + if err != nil { + return xerrors.Errorf("cannot parse r into EthBigInt") + } + + s_, err := parseBigInt(sig.Data[32:64]) + if err != nil { + return xerrors.Errorf("cannot parse s into EthBigInt") + } + + v_, err := parseBigInt([]byte{sig.Data[64]}) + if err != nil { + return xerrors.Errorf("cannot parse v into EthBigInt") + } + + tx.R = r_ + tx.S = s_ + tx.V = v_ + + return nil +} + +func (tx *Eth1559TxArgs) packTxFields() ([]interface{}, error) { + chainId, err := formatInt(tx.ChainID) + if err != nil { + return nil, err + } + + nonce, err := formatInt(tx.Nonce) + if err != nil { + return nil, err + } + + maxPriorityFeePerGas, err := formatBigInt(tx.MaxPriorityFeePerGas) + if err != nil { + return nil, err + } + + maxFeePerGas, err := formatBigInt(tx.MaxFeePerGas) + if err != nil { + return nil, err + } + + gasLimit, err := formatInt(tx.GasLimit) + if err != nil { + return nil, err + } + + value, err := formatBigInt(tx.Value) + if err != nil { + return nil, err + } + + res := []interface{}{ + chainId, + nonce, + maxPriorityFeePerGas, + maxFeePerGas, + gasLimit, + formatEthAddr(tx.To), + value, + tx.Input, + []interface{}{}, // access list + } + return res, nil +} + +func parseEip1559Tx(data []byte) (*Eth1559TxArgs, error) { + if data[0] != EIP1559TxType { + return nil, xerrors.Errorf("not an EIP-1559 transaction: first byte is not %d", EIP1559TxType) + } + + d, err := DecodeRLP(data[1:]) + if err != nil { + return nil, err + } + decoded, ok := d.([]interface{}) + if !ok { + return nil, xerrors.Errorf("not an EIP-1559 transaction: decoded data is not a list") + } + + if len(decoded) != 12 { + return nil, xerrors.Errorf("not an EIP-1559 transaction: should have 12 elements in the rlp list") + } + + chainId, err := parseInt(decoded[0]) + if err != nil { + return nil, err + } + + nonce, err := parseInt(decoded[1]) + if err != nil { + return nil, err + } + + maxPriorityFeePerGas, err := parseBigInt(decoded[2]) + if err != nil { + return nil, err + } + + maxFeePerGas, err := parseBigInt(decoded[3]) + if err != nil { + return nil, err + } + + gasLimit, err := parseInt(decoded[4]) + if err != nil { + return nil, err + } + + to, err := parseEthAddr(decoded[5]) + if err != nil { + return nil, err + } + + value, err := parseBigInt(decoded[6]) + if err != nil { + return nil, err + } + + input, err := parseBytes(decoded[7]) + if err != nil { + return nil, err + } + + accessList, ok := decoded[8].([]interface{}) + if !ok || (ok && len(accessList) != 0) { + return nil, xerrors.Errorf("access list should be an empty list") + } + + r, err := parseBigInt(decoded[10]) + if err != nil { + return nil, err + } + + s, err := parseBigInt(decoded[11]) + if err != nil { + return nil, err + } + + v, err := parseBigInt(decoded[9]) + if err != nil { + return nil, err + } + + // EIP-1559 and EIP-2930 transactions only support 0 or 1 for v + // Legacy and EIP-155 transactions support other values + // https://github.com/ethers-io/ethers.js/blob/56fabe987bb8c1e4891fdf1e5d3fe8a4c0471751/packages/transactions/src.ts/index.ts#L333 + if !v.Equals(big.NewInt(0)) && !v.Equals(big.NewInt(1)) { + return nil, xerrors.Errorf("EIP-1559 transactions only support 0 or 1 for v") + } + + args := Eth1559TxArgs{ + ChainID: chainId, + Nonce: nonce, + To: to, + MaxPriorityFeePerGas: maxPriorityFeePerGas, + MaxFeePerGas: maxFeePerGas, + GasLimit: gasLimit, + Value: value, + Input: input, + V: v, + R: r, + S: s, + } + return &args, nil +} + +func Eth1559TxArgsFromUnsignedFilecoinMessage(msg *types.Message) (*Eth1559TxArgs, error) { + if msg.Version != 0 { + return nil, fmt.Errorf("unsupported msg version: %d", msg.Version) + } + + params, to, err := getEthParamsAndRecipient(msg) + if err != nil { + return nil, fmt.Errorf("failed to get eth params and recipient: %w", err) + } + + return &Eth1559TxArgs{ + ChainID: build.Eip155ChainId, + Nonce: int(msg.Nonce), + To: to, + Value: msg.Value, + Input: params, + MaxFeePerGas: msg.GasFeeCap, + MaxPriorityFeePerGas: msg.GasPremium, + GasLimit: int(msg.GasLimit), + }, nil +} diff --git a/chain/types/ethtypes/eth_1559_transactions_test.go b/chain/types/ethtypes/eth_1559_transactions_test.go new file mode 100644 index 00000000000..69835925fef --- /dev/null +++ b/chain/types/ethtypes/eth_1559_transactions_test.go @@ -0,0 +1,241 @@ +package ethtypes + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + "golang.org/x/crypto/sha3" + + "github.com/filecoin-project/go-address" + gocrypto "github.com/filecoin-project/go-crypto" + actorstypes "github.com/filecoin-project/go-state-types/actors" + builtintypes "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/go-state-types/builtin/v10/evm" + init10 "github.com/filecoin-project/go-state-types/builtin/v10/init" + crypto1 "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/lib/sigs" + _ "github.com/filecoin-project/lotus/lib/sigs/delegated" +) + +type TxTestcase struct { + TxJSON string + NosigTx string + Input EthBytes + Output Eth1559TxArgs +} + +func TestEIP1559TxArgs(t *testing.T) { + testcases, err := prepareTxTestcases() + require.Nil(t, err) + require.NotEmpty(t, testcases) + + for i, tc := range testcases { + comment := fmt.Sprintf("case %d: \n%s\n%s", i, tc.TxJSON, hex.EncodeToString(tc.Input)) + + // parse txargs + txArgs, err := parseEip1559Tx(tc.Input) + require.NoError(t, err, comment) + + msgRecovered, err := txArgs.ToRlpUnsignedMsg() + require.NoError(t, err, comment) + require.Equal(t, tc.NosigTx, "0x"+hex.EncodeToString(msgRecovered), comment) + + // verify signatures + from, err := txArgs.Sender() + require.NoError(t, err, comment) + + smsg, err := ToSignedFilecoinMessage(txArgs) + require.NoError(t, err, comment) + + err = sigs.Verify(&smsg.Signature, from, msgRecovered) + require.NoError(t, err, comment) + + // verify data + require.Equal(t, tc.Output.ChainID, txArgs.ChainID, comment) + require.Equal(t, tc.Output.Nonce, txArgs.Nonce, comment) + require.Equal(t, tc.Output.To, txArgs.To, comment) + } +} + +func TestEIP1559Signatures(t *testing.T) { + testcases := []struct { + RawTx string + ExpectedR string + ExpectedS string + ExpectedV string + ExpectErr bool + }{ + { + "0x02f8598401df5e76028301d69083086a5e835532dd808080c080a0457e33227ac7ceee2ef121755e26b872b6fb04221993f9939349bb7b0a3e1595a02d8ef379e1d2a9e30fa61c92623cc9ed72d80cf6a48cfea341cb916bcc0a81bc", + "0x457e33227ac7ceee2ef121755e26b872b6fb04221993f9939349bb7b0a3e1595", + "0x2d8ef379e1d2a9e30fa61c92623cc9ed72d80cf6a48cfea341cb916bcc0a81bc", + "0x0", + false, + }, + { + "0x02f8598401df5e76038301d69083086a5e835532dd808080c001a012a232866dcb0671eb0ddc01fb9c01d6ef384ec892bb29691ed0d2d293052ddfa052a6ae38c6139930db21a00eee2a4caced9a6500991b823d64ec664d003bc4b1", + "0x12a232866dcb0671eb0ddc01fb9c01d6ef384ec892bb29691ed0d2d293052ddf", + "0x52a6ae38c6139930db21a00eee2a4caced9a6500991b823d64ec664d003bc4b1", + "0x1", + false, + }, + { + "0x00", + "", + "", + "", + true, + }, + } + + for _, tc := range testcases { + tx, err := parseEip1559Tx(mustDecodeHex(tc.RawTx)) + if tc.ExpectErr { + require.Error(t, err) + continue + } + require.Nil(t, err) + + sig, err := tx.Signature() + require.Nil(t, err) + + require.NoError(t, tx.InitialiseSignature(*sig)) + + require.Equal(t, tc.ExpectedR, "0x"+tx.R.Text(16)) + require.Equal(t, tc.ExpectedS, "0x"+tx.S.Text(16)) + require.Equal(t, tc.ExpectedV, "0x"+tx.V.Text(16)) + } +} + +func TestTransformParams(t *testing.T) { + constructorParams, err := actors.SerializeParams(&evm.ConstructorParams{ + Initcode: mustDecodeHex("0x1122334455"), + }) + require.Nil(t, err) + + evmActorCid, ok := actors.GetActorCodeID(actorstypes.Version10, "reward") + require.True(t, ok) + + params, err := actors.SerializeParams(&init10.ExecParams{ + CodeCID: evmActorCid, + ConstructorParams: constructorParams, + }) + require.Nil(t, err) + + var exec init10.ExecParams + reader := bytes.NewReader(params) + err1 := exec.UnmarshalCBOR(reader) + require.Nil(t, err1) + + var evmParams evm.ConstructorParams + reader1 := bytes.NewReader(exec.ConstructorParams) + err1 = evmParams.UnmarshalCBOR(reader1) + require.Nil(t, err1) + + require.Equal(t, mustDecodeHex("0x1122334455"), evmParams.Initcode) +} + +func TestEcRecover(t *testing.T) { + rHex := "0x479ff7fa64cf8bf641eb81635d1e8a698530d2f219951d234539e6d074819529" + sHex := "0x4b6146d27be50cdbb2853ba9a42f207af8d730272f1ebe9c9a78aeef1d6aa924" + fromHex := "0x3947D223fc5415f43ea099866AB62B1d4D33814D" + v := byte(0) + + msgHex := "0x02f1030185012a05f2008504a817c800825208942b87d1cb599bc2a606db9a0169fcec96af04ad3a880de0b6b3a764000080c0" + pubKeyHex := "0x048362749392a0e192eff600d21155236c5a0648d300a8e0e44d8617712c7c96384c75825dc5c7595df2a5005fd8a0f7c809119fb9ab36403ed712244fc329348e" + + msg := mustDecodeHex(msgHex) + pubKey := mustDecodeHex(pubKeyHex) + r := mustDecodeHex(rHex) + s := mustDecodeHex(sHex) + from := mustDecodeHex(fromHex) + + sig := append(r, s...) + sig = append(sig, v) + require.Equal(t, 65, len(sig)) + + sha := sha3.NewLegacyKeccak256() + sha.Write(msg) + h := sha.Sum(nil) + + pubk, err := gocrypto.EcRecover(h, sig) + require.Nil(t, err) + require.Equal(t, pubKey, pubk) + + sha.Reset() + sha.Write(pubk[1:]) + h = sha.Sum(nil) + h = h[len(h)-20:] + + require.Equal(t, from, h) +} + +func TestDelegatedSigner(t *testing.T) { + rHex := "0xcf1fa52fae9154ba21d67aeca9b42adfe186eb9e426c441051a8473efd190848" + sHex := "0x0e6c8c79ffaf35fb8f136c8cf6c5656f1f3befad21f2644321aa6dba58d68737" + v := byte(0) + + msgHex := "0x02f08401df5e76038502540be400843b9aca008398968094ff000000000000000000000000000000000003f2832dc6c080c0" + pubKeyHex := "0x04cfecc0520d906cbfea387759246e89d85e2998843e56ad1c41de247ce10b3e4c453aa73c8de13c178d94461b6fa3f8b6f74406ce43d2fbab6992d0b283394242" + + msg := mustDecodeHex(msgHex) + pubk := mustDecodeHex(pubKeyHex) + r := mustDecodeHex(rHex) + s := mustDecodeHex(sHex) + + addrHash, err := EthAddressFromPubKey(pubk) + require.NoError(t, err) + + from, err := address.NewDelegatedAddress(builtintypes.EthereumAddressManagerActorID, addrHash) + require.NoError(t, err) + + sig := append(r, s...) + sig = append(sig, v) + require.Equal(t, 65, len(sig)) + + signature := &crypto1.Signature{ + Type: crypto1.SigTypeDelegated, + Data: sig, + } + + err = sigs.Verify(signature, from, msg) + require.NoError(t, err) +} + +func prepareTxTestcases() ([]TxTestcase, error) { + tcstr := `[{"input":"0x02f86282013a8080808094ff000000000000000000000000000000000003ec8080c080a0f411a73e33523b40c1a916e79e67746bd01a4a4fb4ecfa87b441375a215ddfb4a0551692c1553574fab4c227ca70cb1c121dc3a2ef82179a9c984bd7acc0880a38","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02df82013a8080808094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86382013a81c880808094ff000000000000000000000000000000000003ec8080c001a0ed75a56e365c88479bf3f60251a2dd47ae181f1a3d95724581a3f648487b4396a046628bb9734edf4b4c455f2bbd351e43c466f315272cd1927f2c55d9b52e058b","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e082013a81c880808094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86282013a8080808094ff000000000000000000000000000000000003ec8080c080a0f411a73e33523b40c1a916e79e67746bd01a4a4fb4ecfa87b441375a215ddfb4a0551692c1553574fab4c227ca70cb1c121dc3a2ef82179a9c984bd7acc0880a38","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02df82013a8080808094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86382013a81c880808094ff000000000000000000000000000000000003ec8080c001a0ed75a56e365c88479bf3f60251a2dd47ae181f1a3d95724581a3f648487b4396a046628bb9734edf4b4c455f2bbd351e43c466f315272cd1927f2c55d9b52e058b","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e082013a81c880808094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f88682013a8080808094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a0706d871013403cf8b965dfa7f2be5a4d185d746da45b21d5a67c667c26d255d6a02e68a14f386aa325ce8e82d30405107d53103d038cf20e40af961ef3a3963608","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84382013a8080808094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88782013a81c880808094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a0df137d0a6733354b2f2419a4ea5fe77d333deca28b2fe091d76190b51c2bae73a0232cbf9c29b8840cbf104ff77360fbf3ca4acda29b5e230636e19ac253ad92de","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84482013a81c880808094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86482013a808082ea608094ff000000000000000000000000000000000003ec8080c001a03a2880cc65e88d5320067f502a0ffda72111d01f0ebeeea9fbeb812e457aa0f9a020c08483b104dbfbbbffffedc3acdbe8245ca6daf97c0dbab843d747e587d625","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e182013a808082ea608094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86582013a81c88082ea608094ff000000000000000000000000000000000003ec8080c001a03427daf1639de6bf1b948abeab765b0a6a9170cc6a16d263c71c859f78916b03a01bbbb824b9953b5eb9f3098b4358a7ebb78f3358866eed997de66350ae4c9475","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e282013a81c88082ea608094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86482013a808082ea608094ff000000000000000000000000000000000003ec8080c001a03a2880cc65e88d5320067f502a0ffda72111d01f0ebeeea9fbeb812e457aa0f9a020c08483b104dbfbbbffffedc3acdbe8245ca6daf97c0dbab843d747e587d625","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e182013a808082ea608094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86582013a81c88082ea608094ff000000000000000000000000000000000003ec8080c001a03427daf1639de6bf1b948abeab765b0a6a9170cc6a16d263c71c859f78916b03a01bbbb824b9953b5eb9f3098b4358a7ebb78f3358866eed997de66350ae4c9475","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e282013a81c88082ea608094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f88882013a808082ea608094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0b9ebc36653a4800816f71ceacf93a1ee601a136916a3476ea9073a9a55ff026aa0647665249b12e8d1d1773b91844588ed70f65c91bc088ccb259ec0f0a24330d5","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84582013a808082ea608094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88982013a81c88082ea608094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0122dd8468dbd34e111e1a5ea1997199be633aa3bc9c1a7ee27dc3a8eda39c29da07cb99cd28ac67f55e507a8b8ef5b931c56cacf79273a4a2969a004a4b4a2864a","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84682013a81c88082ea608094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86482013a8082ea60808094ff000000000000000000000000000000000003ec8080c080a0c1d020df63cb6db76e3a27a60ba0500a3cdd30f9f47b08733009dc8d610ea29ba05cbafb4c223417526ded0b02b8eb66a73535386d0e62da0e20f3641b532aa406","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e182013a8082ea60808094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86582013a81c882ea60808094ff000000000000000000000000000000000003ec8080c080a090e30d32c6cd3f1ba2109b6a9f1c9fffc50b96a934192edf98adc086299e410ba057db0c136436de2e907942bdaad8e0113cf576f250b336ab652ef094c260dae6","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e282013a81c882ea60808094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86482013a8082ea60808094ff000000000000000000000000000000000003ec8080c080a0c1d020df63cb6db76e3a27a60ba0500a3cdd30f9f47b08733009dc8d610ea29ba05cbafb4c223417526ded0b02b8eb66a73535386d0e62da0e20f3641b532aa406","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e182013a8082ea60808094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86582013a81c882ea60808094ff000000000000000000000000000000000003ec8080c080a090e30d32c6cd3f1ba2109b6a9f1c9fffc50b96a934192edf98adc086299e410ba057db0c136436de2e907942bdaad8e0113cf576f250b336ab652ef094c260dae6","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e282013a81c882ea60808094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f88882013a8082ea60808094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a016e3f30a612fc802bb64b765325ecf78f2769b879a9acf62f07669f9723335d6a0781bb3444a73819f28233f1eebf8c3a4de288842fd73c2e05a7a7b0c288d5b25","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84582013a8082ea60808094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88982013a81c882ea60808094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a0b652a447bdcdd1906ed86406ee543ee06023e4f762784c1d3aaf4c3bd85c6a17a0368ae9995e15258f14b74f937e97140a659d052d341674be0c24452257b56b30","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84682013a81c882ea60808094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86682013a8082ea6082ea608094ff000000000000000000000000000000000003ec8080c001a0b1411f337b69609a256c0e76c57ccf4af87e977c98fd2a889f29281bf623cab4a049bec0fb4773aed870bae9c1cdf1ee398c498f0b436dcd19cae588b4ecd8bdf2","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e382013a8082ea6082ea608094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86782013a81c882ea6082ea608094ff000000000000000000000000000000000003ec8080c080a00b845fec9c96bf593c3501753764e14867d3f5d4bd02051e49329b6810d6513ea070d046e5b38c18c542594b328f02345a8f34ab05fd00db33974f914f7ae31c63","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e482013a81c882ea6082ea608094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86682013a8082ea6082ea608094ff000000000000000000000000000000000003ec8080c001a0b1411f337b69609a256c0e76c57ccf4af87e977c98fd2a889f29281bf623cab4a049bec0fb4773aed870bae9c1cdf1ee398c498f0b436dcd19cae588b4ecd8bdf2","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e382013a8082ea6082ea608094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86782013a81c882ea6082ea608094ff000000000000000000000000000000000003ec8080c080a00b845fec9c96bf593c3501753764e14867d3f5d4bd02051e49329b6810d6513ea070d046e5b38c18c542594b328f02345a8f34ab05fd00db33974f914f7ae31c63","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e482013a81c882ea6082ea608094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f88a82013a8082ea6082ea608094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a02d8215d8408d2f4b83a2e68f4aad6fe5dee97d7ef6a43b02ec413ead2215ac80a0641a43cebd6905e3e324c0dd06585d5ffc9b971b519045999c48e31db7aa7f9d","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84782013a8082ea6082ea608094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88a82013a81c882ea6082ea608094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0da68784e191ce0806527d389f84b5d15bed3908e1c2cc0d8f0cea7a29eb0dba39f231a0b438b7d0f0f57292c68dc174d4ee6df7add933ab4e0b3789f597a7d3b","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84882013a81c882ea6082ea608094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86482013a80808082ea6094ff000000000000000000000000000000000003ec8080c080a04c97162e2d2ab508116a23c522fd816ecd9cb091d4c288afe45c37ee3a8dde34a06ebf67ff15b74d65c276340aaebde8e6ebb8da0d3bbab43deffac8eb1e6a0630","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e182013a80808082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86582013a81c8808082ea6094ff000000000000000000000000000000000003ec8080c080a0d503d409e667c2876ab9e420854cecce4c0092985855234be07f270bfcf3ed4aa07a40deecc8a4448d4dc0e2014b4b23ac5721409c62bffa05aee6938d8447f72d","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e282013a81c8808082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86482013a80808082ea6094ff000000000000000000000000000000000003ec8080c080a04c97162e2d2ab508116a23c522fd816ecd9cb091d4c288afe45c37ee3a8dde34a06ebf67ff15b74d65c276340aaebde8e6ebb8da0d3bbab43deffac8eb1e6a0630","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e182013a80808082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86582013a81c8808082ea6094ff000000000000000000000000000000000003ec8080c080a0d503d409e667c2876ab9e420854cecce4c0092985855234be07f270bfcf3ed4aa07a40deecc8a4448d4dc0e2014b4b23ac5721409c62bffa05aee6938d8447f72d","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e282013a81c8808082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f88882013a80808082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a059aecc1d365ee0dc56a577d162f04c0912a5c5b62f889cff1acc706ac17a4489a017209b3ec43a10a40c5863a2b7a1ee823380ad42697a5f7d5f537c230583a4c7","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84582013a80808082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88982013a81c8808082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a0dc1eb40f93e311f3f9a94d8a695db2bbb38973ce097121875885e4bc54f18152a0075da0bd405bb4f5c69034daaf8f40052b941fae5b9f3b8df218d80fb4d7ea99","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84682013a81c8808082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86682013a808082ea6082ea6094ff000000000000000000000000000000000003ec8080c080a03d392fd5e83c64554907a55204572aaeec6ffab25f2c73655c6a22344fa02a14a03b9ae94b7dc21108db6dda65125ecaff844f8f43f483bed35f32f6d5d530fe9f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e382013a808082ea6082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86782013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec8080c001a0405e8a430ef6ad4c3403150776af08c255b6f6fbe278d194f88517733c816caca0364203b5bca7953dd863d4cf90c0a77b499ef4a3d5831c4fdf33926c31709c4f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e482013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86682013a808082ea6082ea6094ff000000000000000000000000000000000003ec8080c080a03d392fd5e83c64554907a55204572aaeec6ffab25f2c73655c6a22344fa02a14a03b9ae94b7dc21108db6dda65125ecaff844f8f43f483bed35f32f6d5d530fe9f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e382013a808082ea6082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86782013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec8080c001a0405e8a430ef6ad4c3403150776af08c255b6f6fbe278d194f88517733c816caca0364203b5bca7953dd863d4cf90c0a77b499ef4a3d5831c4fdf33926c31709c4f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e482013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f88a82013a808082ea6082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a083cf6701aee00872946b6550c059f028f72e3052acb8cc9c25b830ace860e046a03fd969d73e995d43896659f94d3956a17da18451050349e7db6f7881f8c057d3","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84782013a808082ea6082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88b82013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0c5a545f2d94e719068d9a43b01879bcb46b56e236dd378dd26ef3b8e4ec8314aa04024b9936960b9b156405e4f3e0b6562518df8778324a927381e380b23f47fb8","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84882013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86682013a8082ea608082ea6094ff000000000000000000000000000000000003ec8080c080a0aa406ec7f4901a1777e44b975ff41603b9d46257efdc1ca904a3e7890f2b020ea03bda5c785182cfa2d9f9b7a54f194cd08b9d0f913069a4514ff21e8fa0ef3850","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e382013a8082ea608082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86782013a81c882ea608082ea6094ff000000000000000000000000000000000003ec8080c080a089fc465c24b4bad898cf900f585eddab6d40189e8d19746da76597f86fbadf51a005732ffa2ebac36646afab9105540b543f74a5c91b441834a2b1930815c2ccc8","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e482013a81c882ea608082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86682013a8082ea608082ea6094ff000000000000000000000000000000000003ec8080c080a0aa406ec7f4901a1777e44b975ff41603b9d46257efdc1ca904a3e7890f2b020ea03bda5c785182cfa2d9f9b7a54f194cd08b9d0f913069a4514ff21e8fa0ef3850","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e382013a8082ea608082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86782013a81c882ea608082ea6094ff000000000000000000000000000000000003ec8080c080a089fc465c24b4bad898cf900f585eddab6d40189e8d19746da76597f86fbadf51a005732ffa2ebac36646afab9105540b543f74a5c91b441834a2b1930815c2ccc8","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e482013a81c882ea608082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f88a82013a8082ea608082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a09d9a8ee802486b826348a76346987b3e7331d70ef0c0257ff976ceebef1141a2a07d97d14ed877c16bd932f08a67c374e773ee3337d512ff8241c8d78566a04d46","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84782013a8082ea608082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88b82013a81c882ea608082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a024ad1ec1578f51beb2b574507bda7691a486cdbc9c22add01ad4c1f686beb567a048445e0fe8945b8052e5e87139690c0615a11c52503b226cf23610c999eada40","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84882013a81c882ea608082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86882013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec8080c080a06b382fcbe48de85615ff6e2dcc0c84021beb4abc527878accd36c9c77af84ba8a06a07d34a6896b270538525cb14b0856ceb442714fa85e4c9ee36dedf638935f9","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e582013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86982013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec8080c080a0ba2586cfb3323fd0f9d7bb38bf9948758a52f156bda66f7100b789760894ad89a01e4bd2ff4eff2c391915141250313ab845401d5e2f71c23691d20a0b3c68cbd9","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e682013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86882013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec8080c080a06b382fcbe48de85615ff6e2dcc0c84021beb4abc527878accd36c9c77af84ba8a06a07d34a6896b270538525cb14b0856ceb442714fa85e4c9ee36dedf638935f9","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e582013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86982013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec8080c080a0ba2586cfb3323fd0f9d7bb38bf9948758a52f156bda66f7100b789760894ad89a01e4bd2ff4eff2c391915141250313ab845401d5e2f71c23691d20a0b3c68cbd9","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e682013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f88c82013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a0f36ff02ab3e90d2de77cdb24423dc39ca5c959429db62cb5c9ed4f0c9e04703aa0476bf841b0602af44039801d4e68648971f63fc2152002b127be6d914d4fc5ca","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84982013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88d82013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a08267ae8838a8a5d9c2a761c182b5759184b7672b761278d499c1514fb6e8a495a023aa268f67da7728767e114fdec4d141bf649e0ad931117b5b325834dbf72803","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84a82013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86282013a8080808094ff000000000000000000000000000000000003ec6480c080a011ec4af7fc663080460b70ae8829f47e9cfa1814c616750d359459cbbba55563a0446e4ec9ea504d13dcbef44238e442caad366dbae1ae9408d39c6d902a5577b0","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02df82013a8080808094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86382013a81c880808094ff000000000000000000000000000000000003ec6480c001a0b80bc30bef46b3f824d1460685db875ff070f7798c3148c1fc49c01d6acc550ca0437efe7721563800e6a56ac54877a72c7860cd5e17ef4675afe989822ae87759","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e082013a81c880808094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86282013a8080808094ff000000000000000000000000000000000003ec6480c080a011ec4af7fc663080460b70ae8829f47e9cfa1814c616750d359459cbbba55563a0446e4ec9ea504d13dcbef44238e442caad366dbae1ae9408d39c6d902a5577b0","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02df82013a8080808094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86382013a81c880808094ff000000000000000000000000000000000003ec6480c001a0b80bc30bef46b3f824d1460685db875ff070f7798c3148c1fc49c01d6acc550ca0437efe7721563800e6a56ac54877a72c7860cd5e17ef4675afe989822ae87759","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e082013a81c880808094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f88682013a8080808094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a06ab9d5988105d28dd090e509c8caabaa7773fc08ec5ef3dfeae532e01938ff69a078bca296df26dd2497a49110e138a49a67a6e232a35524b041d04a10fc583651","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84382013a8080808094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88782013a81c880808094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a031d51b866a02a9966250d312ed6cb4e083f9131ad8f6bb5814074375093d7536a03f8f819c4011dd54348930b6f98f365de8060b487ada38a62a5617aab6cc6e09","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84482013a81c880808094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86482013a808082ea608094ff000000000000000000000000000000000003ec6480c001a05bda5ad44c8f9a7516226488cf2d4f53188b40352f35ea7cece8076acda26dbba015373b3b78c88b74c7cca32fd02696a248bb9bea22a09c7a4a17b9e3b629b896","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e182013a808082ea608094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86582013a81c88082ea608094ff000000000000000000000000000000000003ec6480c080a00d92624cc3335c903077e318204929b4a8c9cd96d94690b0191f8a3bb24e937aa02f1d0315ececf46900154791a732eb8fee9efd0dc998a4e6b892d07ad657a815","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e282013a81c88082ea608094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86482013a808082ea608094ff000000000000000000000000000000000003ec6480c001a05bda5ad44c8f9a7516226488cf2d4f53188b40352f35ea7cece8076acda26dbba015373b3b78c88b74c7cca32fd02696a248bb9bea22a09c7a4a17b9e3b629b896","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e182013a808082ea608094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86582013a81c88082ea608094ff000000000000000000000000000000000003ec6480c080a00d92624cc3335c903077e318204929b4a8c9cd96d94690b0191f8a3bb24e937aa02f1d0315ececf46900154791a732eb8fee9efd0dc998a4e6b892d07ad657a815","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e282013a81c88082ea608094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f88882013a808082ea608094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0def168136c0532ec148a9e200e3cc1b22f90c7bbc5d9ef25ac0c5d342e8f3784a022f94642dfc81ba321b3e09879888332fa7c25b623bead7686e3e493c0911b55","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84582013a808082ea608094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88982013a81c88082ea608094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0626f43b80260f84cde2c67538c5cfbd328ce85b0f934e8568769e51709b100a7a0283fff5dbfde72b72e2b74c464b1add985d72750be3f4e16ae8ffb4747a40ff2","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84682013a81c88082ea608094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86482013a8082ea60808094ff000000000000000000000000000000000003ec6480c080a051b109080002dab4aae47139eb92ddea8951ef5ac6dfc3d7fa07621047dbc680a0334aa47a2888a6cc52b8cf3c3635192b66c692416e954822c1c93c3896ff1ead","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e182013a8082ea60808094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86582013a81c882ea60808094ff000000000000000000000000000000000003ec6480c080a009e179e3bad2da6fb5e205e52fd8d1c462007162aabde5a4d6b052dd4fc4f23ca063922c31438835adf2e4424e2e7d5d2702ec65de2e24a72b491ff0004a53865d","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e282013a81c882ea60808094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86482013a8082ea60808094ff000000000000000000000000000000000003ec6480c080a051b109080002dab4aae47139eb92ddea8951ef5ac6dfc3d7fa07621047dbc680a0334aa47a2888a6cc52b8cf3c3635192b66c692416e954822c1c93c3896ff1ead","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e182013a8082ea60808094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86582013a81c882ea60808094ff000000000000000000000000000000000003ec6480c080a009e179e3bad2da6fb5e205e52fd8d1c462007162aabde5a4d6b052dd4fc4f23ca063922c31438835adf2e4424e2e7d5d2702ec65de2e24a72b491ff0004a53865d","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e282013a81c882ea60808094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f88882013a8082ea60808094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0d3bfebc6597304c6a06491f68d2ac149fc233d28e81af48dd5b1f83e6ff951d2a06668da06d86aba341971dabb58016ca7764cd4b4c1634e3f829dcc8ef8bca4f6","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84582013a8082ea60808094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88982013a81c882ea60808094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a0d45b9fd9a2a3fdf79805cf73b70348037cc69927209a5e3728fe62cbe9543f03a02f5f8477666487ee5148a65ce59f400beac7c208369162b2d555411314d358fb","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84682013a81c882ea60808094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86682013a8082ea6082ea608094ff000000000000000000000000000000000003ec6480c001a02a6a910f7b5f83fda937006021b9c074f4544d5bb37b9b5a1b7045095f461836a038572b25418528bce7e6a3a480cf9fc90a33d9c63b392c2dbc8faf72a1e4ab8f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e382013a8082ea6082ea608094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86782013a81c882ea6082ea608094ff000000000000000000000000000000000003ec6480c080a07a6dd661b5da27c809cce22aa186c158fe3b07a484a9395fd9a7a31a2b90636fa02b86f82b661264e27c3fda085b59740d3059335bff91693291afcf93c7ca627c","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e482013a81c882ea6082ea608094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86682013a8082ea6082ea608094ff000000000000000000000000000000000003ec6480c001a02a6a910f7b5f83fda937006021b9c074f4544d5bb37b9b5a1b7045095f461836a038572b25418528bce7e6a3a480cf9fc90a33d9c63b392c2dbc8faf72a1e4ab8f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e382013a8082ea6082ea608094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86782013a81c882ea6082ea608094ff000000000000000000000000000000000003ec6480c080a07a6dd661b5da27c809cce22aa186c158fe3b07a484a9395fd9a7a31a2b90636fa02b86f82b661264e27c3fda085b59740d3059335bff91693291afcf93c7ca627c","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e482013a81c882ea6082ea608094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f88a82013a8082ea6082ea608094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a08c13c10490bc20cb1e55dc54ececb37a6c9cc8d013dbe513feacbb0416f09feba045c4e038759a0901820091e043db326b1bf9a8a1cd046ac72629969497c6a86f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84782013a8082ea6082ea608094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88b82013a81c882ea6082ea608094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0b904edf8eb9b6beb9cde9e1fae538e12f8d40e9124ace0cba2eee8cbbe77aa10a0788a0bd9a6fb98e7230f5db89be2f5067d1a227ba277b9cb155fb5859c57aae6","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84882013a81c882ea6082ea608094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86482013a80808082ea6094ff000000000000000000000000000000000003ec6480c080a08d10a7a81c561391fe88bcb2c1dfbf4f7140fb7884fec0558606e76ffc4eaa91a049fa2a95e0f07a4376df9c6f2e1563ad443ce8369d44c6e1ce8ee521805b3623","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e182013a80808082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86582013a81c8808082ea6094ff000000000000000000000000000000000003ec6480c001a00de6dc2841a25e5ea2dc1e054d69638ec519a9953666930060797cd110cde122a07fd1dcb6319eca7c681cef006efb3f7dcd74ff98a79ce05917d5d1fa7a175b6f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e282013a81c8808082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86482013a80808082ea6094ff000000000000000000000000000000000003ec6480c080a08d10a7a81c561391fe88bcb2c1dfbf4f7140fb7884fec0558606e76ffc4eaa91a049fa2a95e0f07a4376df9c6f2e1563ad443ce8369d44c6e1ce8ee521805b3623","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e182013a80808082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86582013a81c8808082ea6094ff000000000000000000000000000000000003ec6480c001a00de6dc2841a25e5ea2dc1e054d69638ec519a9953666930060797cd110cde122a07fd1dcb6319eca7c681cef006efb3f7dcd74ff98a79ce05917d5d1fa7a175b6f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e282013a81c8808082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f88882013a80808082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a04c43dab94dd746973a1f7f051cc520cc01e93e9c6c55147cef34e5fdc0b182a2a06d148cc6ec017f9aeb6442a17d72e388ffc835950e19abd0c06057520f893542","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84582013a80808082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88982013a81c8808082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a025b50c1db31c0ae7aaa73374659201b54b71488efecbb6985dc50015abde7e36a04dd8cf68920de7232ab8d1fb28ab94ac05466c1f9d9a3a658f2054fce7868e2c","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84682013a81c8808082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86682013a808082ea6082ea6094ff000000000000000000000000000000000003ec6480c080a0415ad0a93225eaec617206ec835e362d5e75fd0e1903747c1806270ec2684c7da0487ec1479cdb2affa891ff56413818ec169651c906ab932594b6e5bbb79d4998","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e382013a808082ea6082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86782013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec6480c001a0a46ac278c400ef099ad23ac4ccb066a37db8bb5c4d65e0a347152a499ae9eb92a07505f9c67f0897cbe6f848c9a2164c3c234dab2fea7a4dd6f4436be34080e2ff","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e482013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86682013a808082ea6082ea6094ff000000000000000000000000000000000003ec6480c080a0415ad0a93225eaec617206ec835e362d5e75fd0e1903747c1806270ec2684c7da0487ec1479cdb2affa891ff56413818ec169651c906ab932594b6e5bbb79d4998","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e382013a808082ea6082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86782013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec6480c001a0a46ac278c400ef099ad23ac4ccb066a37db8bb5c4d65e0a347152a499ae9eb92a07505f9c67f0897cbe6f848c9a2164c3c234dab2fea7a4dd6f4436be34080e2ff","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e482013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f88a82013a808082ea6082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a0a43aba5078d2da3ecc1ec0c67191f8cf58f29f5b4db7f8d4765ea691ddbd4195a0110e568c803db5ea587b406f452cf49ddf6b6f24d41207973d6c785ffaed1454","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84782013a808082ea6082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88b82013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a00caeadf2fcba95f0deab5ee4899348ecac4a18eeb09317d6f8156b891626d219a0549c5376aba320889c2f7b61fd4a51aec5f9a1d9ed9b26cef0a3bee52fac4989","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84882013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86682013a8082ea608082ea6094ff000000000000000000000000000000000003ec6480c001a07b5568d8a3ec3c7e126f570955db304e31d3f3d7b0c4fd103b6d064a2f6f5e23a030a1b17f299352ae193b8dbce2adda473ccb04e00670f416877762971697606f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e382013a8082ea608082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86782013a81c882ea608082ea6094ff000000000000000000000000000000000003ec6480c080a07bb69d01062f9d6ecb011ad344bbe08d4eca2f6b192dde45015def4c2e6096e0a03a3df52d753e3293d2fd544f72e62ceae00ea6dcab7229685d7b1873d873d203","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e482013a81c882ea608082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86682013a8082ea608082ea6094ff000000000000000000000000000000000003ec6480c001a07b5568d8a3ec3c7e126f570955db304e31d3f3d7b0c4fd103b6d064a2f6f5e23a030a1b17f299352ae193b8dbce2adda473ccb04e00670f416877762971697606f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e382013a8082ea608082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86782013a81c882ea608082ea6094ff000000000000000000000000000000000003ec6480c080a07bb69d01062f9d6ecb011ad344bbe08d4eca2f6b192dde45015def4c2e6096e0a03a3df52d753e3293d2fd544f72e62ceae00ea6dcab7229685d7b1873d873d203","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e482013a81c882ea608082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f88a82013a8082ea608082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0621255015626b35acf19629ce318999336441537920f9f3ff1bfd44e54d8abd3a03b3426f8fa963debdfa6b44561772bdebc9524c7f63abd0d947b678f5e966502","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84782013a8082ea608082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88b82013a81c882ea608082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0b73c3ba53fc5a0f7fab636cc2b826c3873cda5d0be9dd2100fdceae7899f3310a0491905f676063924cf847fdf2e488be4606ce351748e5c88d49ed50c8d595c94","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84882013a81c882ea608082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86882013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec6480c001a0e60702e3f5c5f56e3d1bc2907015ec889d0557ea14e81f137056471fef0fdb9da066e601e6e55c2e37e2042401b352e81841d492d0fe4f05bfe81bba29c9e6ce1f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e582013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86982013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec6480c001a085a947fb201d0b50272e7bb7a056adc9ee6f5904634ed91dbde0d650641b7de3a03635c731769302e955d41f794a63262d5d4d37d117c9db89a6b6bce927b71f42","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e682013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86882013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec6480c001a0e60702e3f5c5f56e3d1bc2907015ec889d0557ea14e81f137056471fef0fdb9da066e601e6e55c2e37e2042401b352e81841d492d0fe4f05bfe81bba29c9e6ce1f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e582013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86982013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec6480c001a085a947fb201d0b50272e7bb7a056adc9ee6f5904634ed91dbde0d650641b7de3a03635c731769302e955d41f794a63262d5d4d37d117c9db89a6b6bce927b71f42","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e682013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f88c82013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0d67e28d31489af5129c4832af814a01e0baa5e5ba6245fe2d3304693ceea48e0a03bc06f1c6dd01a14826c67aa35258c0bbf7c516a9bb21e9190eaa8d3768f49bb","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84982013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88d82013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0a5368984aca4bc1e3d7ebc7ae4ead5e09ffd3b4b4712d039c19fdac948e5952ea065953ace0a29210440d6a0f05d6b43f482950b463b3be6b23fc63452c94b9446","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84a82013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86a82013a8080808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a086da25ab078729b08cf48da02eb1c1e05fe0f4e5d7b332262b68f4db3dc9b72fa04102c03c7d9f11a6fdb77d6a36d3f07e09b1ceaab0bf4ef1fdc604bcd726f83b","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e782013a8080808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86b82013a81c880808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a0cde92f395919b3205b4260867b11597f9ecf363bc1be9bbd8b5400d3381d64b3a01b9555cfa22ee8615c3033235ebad605d0bef616d08876de26719866fcc4d41e","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e882013a81c880808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86a82013a8080808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a086da25ab078729b08cf48da02eb1c1e05fe0f4e5d7b332262b68f4db3dc9b72fa04102c03c7d9f11a6fdb77d6a36d3f07e09b1ceaab0bf4ef1fdc604bcd726f83b","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e782013a8080808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86b82013a81c880808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a0cde92f395919b3205b4260867b11597f9ecf363bc1be9bbd8b5400d3381d64b3a01b9555cfa22ee8615c3033235ebad605d0bef616d08876de26719866fcc4d41e","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e882013a81c880808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f88e82013a8080808094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a03dd64e48a1ae228665b3f180367997ee96bc60ee226615c900e3d86634044328a00f6cdb24633e75fa65f6b93fce9b084c1f30dd03dde97d01f25c6f10f34d5d9d","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84b82013a8080808094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88f82013a81c880808094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a07475efeb8dd5bf4ba7efb31ab67a9077401ed71f4e8dd13e7058ce5cfeb5a0f2a01046e93a5258bf320bc392173a49b6fef15976be4c1210f2e367af223ad8c026","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84c82013a81c880808094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86c82013a808082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a0ca84441c7ba097a7afa5ef9ad7ef70ba58ddfffc06c5d015b5c8553f1632d103a057fee6d92055c9c031a1efa667f3ee554804c4f34a195b6dfc781e1592c20444","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e982013a808082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86d82013a81c88082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a04055dfcd6e0b7264d3474ba13f76659384e5f365ebc6ba271641481b12bf410ca01ef7d04dc33fdf0c3137e31d8c822ad68bbd4f89ada52db9705bb66813d11583","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ea82013a81c88082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86c82013a808082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a0ca84441c7ba097a7afa5ef9ad7ef70ba58ddfffc06c5d015b5c8553f1632d103a057fee6d92055c9c031a1efa667f3ee554804c4f34a195b6dfc781e1592c20444","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e982013a808082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86d82013a81c88082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a04055dfcd6e0b7264d3474ba13f76659384e5f365ebc6ba271641481b12bf410ca01ef7d04dc33fdf0c3137e31d8c822ad68bbd4f89ada52db9705bb66813d11583","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ea82013a81c88082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f89082013a808082ea608094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a02080212bb64a798e1e138e4991ab830cf04d37ffeedf6fde7eba0eb7d972b350a02aff43f9e5ca8d6cea6e918391188fa37bdb91b864eadec705f7c69c4a61bc5a","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84d82013a808082ea608094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f89182013a81c88082ea608094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a0e41c052d72950a563b8ed7fb15855beabea43ff5b038bd6a3ccc6416e3498619a0568bbd7cbff31a47e1d0b9712f382c52e74b7b28cbcb8458974d82a8d54ddc57","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84e82013a81c88082ea608094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86c82013a8082ea60808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c080a057c342304f133ff8832d3d16a43571afe905dc9b10afc24c6e99225cca6d8817a00e2155d1904751ce0d2ba01e6475aeae254c02966773f5bc7650e37252a01a92","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e982013a8082ea60808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86d82013a81c882ea60808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a0fc2a550a7798085cae28028abbe4829be29e5f3a40af221086831d0e17ca3c83a01ce21f5934b9ca566958e09e89c99fd9ed2dc4acae209a6fb81fd3a6c9879a99","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ea82013a81c882ea60808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86c82013a8082ea60808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c080a057c342304f133ff8832d3d16a43571afe905dc9b10afc24c6e99225cca6d8817a00e2155d1904751ce0d2ba01e6475aeae254c02966773f5bc7650e37252a01a92","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e982013a8082ea60808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86d82013a81c882ea60808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a0fc2a550a7798085cae28028abbe4829be29e5f3a40af221086831d0e17ca3c83a01ce21f5934b9ca566958e09e89c99fd9ed2dc4acae209a6fb81fd3a6c9879a99","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ea82013a81c882ea60808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f89082013a8082ea60808094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0fa33b63666310ca1c72fc5d82639c5b8e2a7638910be7bee23ada9f139c6b891a02012cad8e991beea7dcf0b6e9346b0228699698e183e2fadfc5b9b880601af9b","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84d82013a8082ea60808094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f89182013a81c882ea60808094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0bc6ae4e92e7a20d5ff61258653dffda636cee0fd97dd156eac7a1f231f1f2785a0323055e0e0bed496b3fec30be292338d0956ecf8baeeb34458230821589aa7fb","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84e82013a81c882ea60808094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86e82013a8082ea6082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c080a0bd2889395392859a83a33bfe549c09d172e1f289de29d4bc9d0a3d25ea8aa71ba075fe92140a08d8e680061852438623c9cd10e211955577d1a3b56e49e960e4e7","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02eb82013a8082ea6082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86f82013a81c882ea6082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c080a05553c929ae32692a9f742371ffcfc8c8d2b77f31a7795460297cb78c29e357e8a043e42ca4ed7eb1b8e3546de2364522735d79a2e2ff5d16f7f96d165c5815c80c","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ec82013a81c882ea6082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86e82013a8082ea6082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c080a0bd2889395392859a83a33bfe549c09d172e1f289de29d4bc9d0a3d25ea8aa71ba075fe92140a08d8e680061852438623c9cd10e211955577d1a3b56e49e960e4e7","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02eb82013a8082ea6082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86f82013a81c882ea6082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c080a05553c929ae32692a9f742371ffcfc8c8d2b77f31a7795460297cb78c29e357e8a043e42ca4ed7eb1b8e3546de2364522735d79a2e2ff5d16f7f96d165c5815c80c","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ec82013a81c882ea6082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f89282013a8082ea6082ea608094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a055f63a6bef8e23dc437ff4ac9349a59fcde2f72d1879de50b0d3686ff648749da04cf8034df06cf6f15f31bb55979b40eeacbd28fb1d745e608acdc088e22beb66","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84f82013a8082ea6082ea608094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f89382013a81c882ea6082ea608094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0c4a0253448dad999692c1bf3cfb5de9e95a2e96da4e1f64133ada452a825fe9aa0757b576ceb7a2c494819960ac59e9d3a4e3da384f23c0e88ada758dc265eae94","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f85082013a81c882ea6082ea608094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86c82013a80808082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c080a02632c4d8a443afb8d39f91d036fd4915ca3ad2f253b8f93211b4b3ee15566519a009bdc00c8eaaf22f3d7d04b53dbc777fd027a780fb4ddaf01002724ddf2879dd","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e982013a80808082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86d82013a81c8808082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a08bda02c15ca37d35d9ad2e2f7731d24dd039f5c6c6f7eaad739daadac6db33e5a044c01e493e10929e4021c69d9df886b211eb349a865df9f0796846ad1cdf23e8","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ea82013a81c8808082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86c82013a80808082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c080a02632c4d8a443afb8d39f91d036fd4915ca3ad2f253b8f93211b4b3ee15566519a009bdc00c8eaaf22f3d7d04b53dbc777fd027a780fb4ddaf01002724ddf2879dd","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e982013a80808082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86d82013a81c8808082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a08bda02c15ca37d35d9ad2e2f7731d24dd039f5c6c6f7eaad739daadac6db33e5a044c01e493e10929e4021c69d9df886b211eb349a865df9f0796846ad1cdf23e8","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ea82013a81c8808082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f89082013a80808082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0ed0db75f41b2b8b89768ce5ad08716aff149dc1d5a2e593140d8964eb2da3229a02e5248cca9b5af340d73271cad4d690f7efa11c9278824aca528eb15d28aec4d","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84d82013a80808082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f89182013a81c8808082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a07108fbbabc45826dbdc8e4cf831240fb39ead7bd4b8ec5d8de64d04e2885e554a04dae4fb4bdbabb9d8f923d579e75ee980da1b4fac5773ec68f395af240f037f0","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84e82013a81c8808082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86e82013a808082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a0130b6723050095faa2e7abc69c2f785e73d333c65fae6cf2835518f970c627d5a00b90bd4f2ded1da0163ab5e81ad76d51aef005d663137347fc550313e1c8b6fc","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02eb82013a808082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86f82013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c080a0993a50431e82d10d632466d45f8aaffea9a56efa59d529dfd497d3c2a06aabeba0070d3132c6ce1e4ff70b0721d1f4c03ab566b8e2af29d33148033fb3009dc29d","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ec82013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86e82013a808082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a0130b6723050095faa2e7abc69c2f785e73d333c65fae6cf2835518f970c627d5a00b90bd4f2ded1da0163ab5e81ad76d51aef005d663137347fc550313e1c8b6fc","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02eb82013a808082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86f82013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c080a0993a50431e82d10d632466d45f8aaffea9a56efa59d529dfd497d3c2a06aabeba0070d3132c6ce1e4ff70b0721d1f4c03ab566b8e2af29d33148033fb3009dc29d","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ec82013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f89282013a808082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a09c9d3b0d7b58bfe81a6881b9db184e0ade03c1ad11aa8f1566e2f24f50f85525a06c10cf91f4dbc24d0f78ef09a8e2310d349a034cec7e86e807d7a48ea26161e1","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84f82013a808082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f89382013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a0f8423b51e513618c6a4bdd2696479d91c760e11ea24657dd27fa6eb9b7da8c0ea07e9456113fb034718d1b4f4e09ade1ce78251a8c86f298b152850bc5925156cb","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f85082013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86e82013a8082ea608082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a0d09b373d45c1bfc1c5d9b5198e69f974d4df456245e2f7a5edd486f3dd2795a9a011396197a670e7b0c4613b7ebf8aee53382930c7bd25c35dda15acae78ec0e2c","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02eb82013a8082ea608082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86f82013a81c882ea608082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c080a0131f5af3ece9a0b723d0c812dbcfc6cb458acf5e0846cc506215fc04d6af66d5a078d0bf7a40cc1ddcebbc4e86fb9a04bfc94f3da94b4a74476883b7b1729f8a44","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ec82013a81c882ea608082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86e82013a8082ea608082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a0d09b373d45c1bfc1c5d9b5198e69f974d4df456245e2f7a5edd486f3dd2795a9a011396197a670e7b0c4613b7ebf8aee53382930c7bd25c35dda15acae78ec0e2c","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02eb82013a8082ea608082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86f82013a81c882ea608082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c080a0131f5af3ece9a0b723d0c812dbcfc6cb458acf5e0846cc506215fc04d6af66d5a078d0bf7a40cc1ddcebbc4e86fb9a04bfc94f3da94b4a74476883b7b1729f8a44","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ec82013a81c882ea608082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f89282013a8082ea608082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0c286f4ee350eab70273cf9a952537534446a0f39e9bfea7340eabc04396a0e3da01e1302ae987a69836ec2c9266e6fe623db5fcdc566e37084c0c57630c4de8ee6","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84f82013a8082ea608082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f89382013a81c882ea608082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a09dee3fa88e365133a18035618af718a045e1a957f10f50c632f23923fd337b9ba06bbbd59489849803f8c61138932ac1a8361edb4c80789d030542829c0a2b5b7f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f85082013a81c882ea608082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f87082013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a0c1cb1e2b41e48fecd59d72039147c76993653f061f9ea156b53c377673eef7f1a01822506f755206b60209a12ed3c84446f4fcb4ad602fa7ab7ee4ff2acde19ed6","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02ed82013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f87182013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a09817043ad22797d2f26ca46697db5f586c38336a171dce2d22d659889e9e9eb5a0369a5d6169586d9c831b6e017aa29fd49eac0636a136bfa5bafb95390fa95b8f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ee82013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f87082013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a0c1cb1e2b41e48fecd59d72039147c76993653f061f9ea156b53c377673eef7f1a01822506f755206b60209a12ed3c84446f4fcb4ad602fa7ab7ee4ff2acde19ed6","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02ed82013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f87182013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a09817043ad22797d2f26ca46697db5f586c38336a171dce2d22d659889e9e9eb5a0369a5d6169586d9c831b6e017aa29fd49eac0636a136bfa5bafb95390fa95b8f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ee82013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f89482013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a039357ad40087d17551ca2b94723f0394185a993671db02172a7de70c24054852a046c84070dfadd244b358690e5b89c75f3988b21b6614e6e3af2f8ca302d6c42a","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f85182013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f89582013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0c991c81705a4c53a9255e72beb8243638c68f10c63b082755972bbbe15245d12a014f6852ae34c92882559e6810d4372109930a23b522368fdef2c85ce04e27839","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f85282013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"}]` + + testcases := []struct { + Input EthBytes `json:"input"` + Output string `json:"output"` + NosigTx string `json:"nosigTx"` + }{} + + err := json.Unmarshal([]byte(tcstr), &testcases) + if err != nil { + return nil, err + } + + res := []TxTestcase{} + for _, tc := range testcases { + tx := Eth1559TxArgs{} + err := json.Unmarshal([]byte(tc.Output), &tx) + if err != nil { + return nil, err + } + res = append(res, TxTestcase{ + Input: tc.Input, + Output: tx, + TxJSON: tc.Output, + NosigTx: tc.NosigTx, + }) + } + + return res, err +} diff --git a/chain/types/ethtypes/eth_legacy_155_transactions.go b/chain/types/ethtypes/eth_legacy_155_transactions.go new file mode 100644 index 00000000000..bad8a0bbfa1 --- /dev/null +++ b/chain/types/ethtypes/eth_legacy_155_transactions.go @@ -0,0 +1,303 @@ +package ethtypes + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + typescrypto "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" +) + +var _ EthTransaction = (*EthLegacy155TxArgs)(nil) + +// EthLegacy155TxArgs is a legacy Ethereum transaction that uses the EIP-155 chain replay protection mechanism +// by incorporating the chainId in the signature. +// See how the `V` value in the signature is derived from the chainId at +// https://github.com/ethereum/go-ethereum/blob/86a1f0c39494c8f5caddf6bd9fbddd4bdfa944fd/core/types/transaction_signing.go#L424 +// For EthLegacy155TxArgs, the digest that is used to create a signed transaction includes the `ChainID` but the serialised RLP transaction +// does not include the `ChainID` as an explicit field. Instead, the `ChainID` is included in the V value of the signature as mentioned above. +type EthLegacy155TxArgs struct { + legacyTx *EthLegacyHomesteadTxArgs +} + +func NewEthLegacy155TxArgs(tx *EthLegacyHomesteadTxArgs) *EthLegacy155TxArgs { + return &EthLegacy155TxArgs{legacyTx: tx} +} + +func (tx *EthLegacy155TxArgs) GetLegacyTx() *EthLegacyHomesteadTxArgs { + return tx.legacyTx +} + +func (tx *EthLegacy155TxArgs) ToEthTx(smsg *types.SignedMessage) (EthTx, error) { + from, err := EthAddressFromFilecoinAddress(smsg.Message.From) + if err != nil { + return EthTx{}, fmt.Errorf("sender was not an eth account") + } + hash, err := tx.TxHash() + if err != nil { + return EthTx{}, fmt.Errorf("failed to get tx hash: %w", err) + } + + gasPrice := EthBigInt(tx.legacyTx.GasPrice) + ethTx := EthTx{ + ChainID: build.Eip155ChainId, + Type: EthLegacyTxType, + Nonce: EthUint64(tx.legacyTx.Nonce), + Hash: hash, + To: tx.legacyTx.To, + Value: EthBigInt(tx.legacyTx.Value), + Input: tx.legacyTx.Input, + Gas: EthUint64(tx.legacyTx.GasLimit), + GasPrice: &gasPrice, + From: from, + R: EthBigInt(tx.legacyTx.R), + S: EthBigInt(tx.legacyTx.S), + V: EthBigInt(tx.legacyTx.V), + } + + return ethTx, nil +} + +func (tx *EthLegacy155TxArgs) ToUnsignedFilecoinMessage(from address.Address) (*types.Message, error) { + if err := validateEIP155ChainId(tx.legacyTx.V); err != nil { + return nil, fmt.Errorf("failed to validate EIP155 chain id: %w", err) + } + return tx.legacyTx.ToUnsignedFilecoinMessage(from) +} + +func (tx *EthLegacy155TxArgs) ToRlpUnsignedMsg() ([]byte, error) { + return toRlpUnsignedMsg(tx) +} + +func (tx *EthLegacy155TxArgs) TxHash() (EthHash, error) { + encoded, err := tx.ToRawTxBytesSigned() + if err != nil { + return EthHash{}, fmt.Errorf("failed to encode rlp signed msg: %w", err) + } + + return EthHashFromTxBytes(encoded), nil +} + +func (tx *EthLegacy155TxArgs) ToRawTxBytesSigned() ([]byte, error) { + packed1, err := tx.packTxFields() + if err != nil { + return nil, err + } + + packed1 = packed1[:len(packed1)-3] // remove chainId, r and s as they are only used for signature verification + + packed2, err := packSigFields(tx.legacyTx.V, tx.legacyTx.R, tx.legacyTx.S) + if err != nil { + return nil, err + } + + encoded, err := EncodeRLP(append(packed1, packed2...)) + if err != nil { + return nil, fmt.Errorf("failed to encode rlp signed msg: %w", err) + } + return encoded, nil +} + +func (tx *EthLegacy155TxArgs) ToRlpSignedMsg() ([]byte, error) { + return toRlpSignedMsg(tx, tx.legacyTx.V, tx.legacyTx.R, tx.legacyTx.S) +} + +func (tx *EthLegacy155TxArgs) Signature() (*typescrypto.Signature, error) { + if err := validateEIP155ChainId(tx.legacyTx.V); err != nil { + return nil, fmt.Errorf("failed to validate EIP155 chain id: %w", err) + } + r := tx.legacyTx.R.Int.Bytes() + s := tx.legacyTx.S.Int.Bytes() + v := tx.legacyTx.V.Int.Bytes() + + sig := append([]byte{}, padLeadingZeros(r, 32)...) + sig = append(sig, padLeadingZeros(s, 32)...) + sig = append(sig, v...) + + // pre-pend a one byte marker so nodes know that this is a legacy transaction + sig = append([]byte{EthLegacy155TxSignaturePrefix}, sig...) + + if len(sig) != EthLegacy155TxSignatureLen0 && len(sig) != EthLegacy155TxSignatureLen1 { + return nil, fmt.Errorf("signature is not %d OR %d bytes; it is %d bytes", EthLegacy155TxSignatureLen0, EthLegacy155TxSignatureLen1, + len(sig)) + } + + return &typescrypto.Signature{ + Type: typescrypto.SigTypeDelegated, Data: sig, + }, nil +} + +func (tx *EthLegacy155TxArgs) Sender() (address.Address, error) { + if err := validateEIP155ChainId(tx.legacyTx.V); err != nil { + return address.Address{}, fmt.Errorf("failed to validate EIP155 chain id: %w", err) + } + return sender(tx) +} + +func (tx *EthLegacy155TxArgs) Type() int { + return EthLegacyTxType +} + +var big8 = big.NewInt(8) + +func (tx *EthLegacy155TxArgs) ToVerifiableSignature(sig []byte) ([]byte, error) { + if len(sig) != EthLegacy155TxSignatureLen0 && len(sig) != EthLegacy155TxSignatureLen1 { + return nil, fmt.Errorf("signature should be %d or %d bytes long but got %d bytes", + EthLegacy155TxSignatureLen0, EthLegacy155TxSignatureLen1, len(sig)) + } + if sig[0] != EthLegacy155TxSignaturePrefix { + return nil, fmt.Errorf("expected signature prefix 0x%x, but got 0x%x", EthLegacy155TxSignaturePrefix, sig[0]) + } + + // Remove the prefix byte as it's only used for legacy transaction identification + sig = sig[1:] + + // Extract the 'v' value from the signature + vValue := big.NewFromGo(big.NewInt(0).SetBytes(sig[64:])) + + if err := validateEIP155ChainId(vValue); err != nil { + return nil, fmt.Errorf("failed to validate EIP155 chain id: %w", err) + } + + // See https://github.com/ethereum/go-ethereum/blob/86a1f0c39494c8f5caddf6bd9fbddd4bdfa944fd/core/types/transaction_signing.go#L424 + chainIdMul := big.Mul(big.NewIntUnsigned(build.Eip155ChainId), big.NewInt(2)) + vValue = big.Sub(vValue, chainIdMul) + vValue = big.Sub(vValue, big8) + + // Adjust 'v' value for compatibility with new transactions: 27 -> 0, 28 -> 1 + if vValue.Equals(big.NewInt(27)) { + sig[64] = 0 + } else if vValue.Equals(big.NewInt(28)) { + sig[64] = 1 + } else { + return nil, fmt.Errorf("invalid 'v' value: expected 27 or 28, got %d", vValue.Int64()) + } + + return sig[0:65], nil +} + +func (tx *EthLegacy155TxArgs) InitialiseSignature(sig typescrypto.Signature) error { + if sig.Type != typescrypto.SigTypeDelegated { + return fmt.Errorf("RecoverSignature only supports Delegated signature") + } + + if len(sig.Data) != EthLegacy155TxSignatureLen0 && len(sig.Data) != EthLegacy155TxSignatureLen1 { + return fmt.Errorf("signature should be %d or %d bytes long, but got %d bytes", EthLegacy155TxSignatureLen0, + EthLegacy155TxSignatureLen1, len(sig.Data)) + } + + if sig.Data[0] != EthLegacy155TxSignaturePrefix { + return fmt.Errorf("expected signature prefix 0x01, but got 0x%x", sig.Data[0]) + } + + // ignore the first byte of the signature as it's only used for legacy transaction identification + r_, err := parseBigInt(sig.Data[1:33]) + if err != nil { + return fmt.Errorf("cannot parse r into EthBigInt: %w", err) + } + + s_, err := parseBigInt(sig.Data[33:65]) + if err != nil { + return fmt.Errorf("cannot parse s into EthBigInt: %w", err) + } + + v_, err := parseBigInt(sig.Data[65:]) + if err != nil { + return fmt.Errorf("cannot parse v into EthBigInt: %w", err) + } + + if err := validateEIP155ChainId(v_); err != nil { + return fmt.Errorf("failed to validate EIP155 chain id: %w", err) + } + + tx.legacyTx.R = r_ + tx.legacyTx.S = s_ + tx.legacyTx.V = v_ + return nil +} + +func (tx *EthLegacy155TxArgs) packTxFields() ([]interface{}, error) { + nonce, err := formatInt(tx.legacyTx.Nonce) + if err != nil { + return nil, err + } + + // format gas price + gasPrice, err := formatBigInt(tx.legacyTx.GasPrice) + if err != nil { + return nil, err + } + + gasLimit, err := formatInt(tx.legacyTx.GasLimit) + if err != nil { + return nil, err + } + + value, err := formatBigInt(tx.legacyTx.Value) + if err != nil { + return nil, err + } + + chainIdBigInt := big.NewIntUnsigned(build.Eip155ChainId) + chainId, err := formatBigInt(chainIdBigInt) + if err != nil { + return nil, err + } + + r, err := formatInt(0) + if err != nil { + return nil, err + } + + s, err := formatInt(0) + if err != nil { + return nil, err + } + + res := []interface{}{ + nonce, + gasPrice, + gasLimit, + formatEthAddr(tx.legacyTx.To), + value, + tx.legacyTx.Input, + chainId, + r, s, + } + return res, nil +} + +func validateEIP155ChainId(v big.Int) error { + chainId := deriveEIP155ChainId(v) + if !chainId.Equals(big.NewIntUnsigned(build.Eip155ChainId)) { + return fmt.Errorf("invalid chain id, expected %d, got %s", build.Eip155ChainId, chainId.String()) + } + return nil +} + +// deriveEIP155ChainId derives the chain id from the given v parameter +func deriveEIP155ChainId(v big.Int) big.Int { + if big.BitLen(v) <= 64 { + vUint64 := v.Uint64() + if vUint64 == 27 || vUint64 == 28 { + return big.NewInt(0) + } + return big.NewIntUnsigned((vUint64 - 35) / 2) + } + + v = big.Sub(v, big.NewInt(35)) + return big.Div(v, big.NewInt(2)) +} + +func calcEIP155TxSignatureLen(chain uint64, v int) int { + chainId := big.NewIntUnsigned(chain) + vVal := big.Add(big.Mul(chainId, big.NewInt(2)), big.NewInt(int64(v))) + vLen := len(vVal.Int.Bytes()) + + // EthLegacyHomesteadTxSignatureLen includes the 1 byte legacy tx marker prefix and also 1 byte for the V value. + // So we subtract 1 to not double count the length of the v value + return EthLegacyHomesteadTxSignatureLen + vLen - 1 +} diff --git a/chain/types/ethtypes/eth_legacy_155_transactions_test.go b/chain/types/ethtypes/eth_legacy_155_transactions_test.go new file mode 100644 index 00000000000..fe4b786535b --- /dev/null +++ b/chain/types/ethtypes/eth_legacy_155_transactions_test.go @@ -0,0 +1,188 @@ +package ethtypes + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/big" + builtintypes "github.com/filecoin-project/go-state-types/builtin" +) + +func TestEIP155Tx(t *testing.T) { + txStr := "f86680843b9aca00835dc1ba94c2dca9a18d4a4057921d1bcb22da05e68e46b1d06480820297a0f91ee69c4603c4f21131467ee9e06ad4a96d0a29fa8064db61f3adaea0eb6e92a07181e306bb8f773d94cc3b75e9835de00c004e072e6630c0c46971d38706bb01" + + bz := mustDecodeHex(txStr) + + tx, err := parseLegacyTx(bz) + require.NoError(t, err) + + eth155Tx, ok := tx.(*EthLegacy155TxArgs) + require.True(t, ok) + + // Verify nonce + require.EqualValues(t, 0, eth155Tx.legacyTx.Nonce) + + // Verify recipient address + expectedToAddr, err := ParseEthAddress("0xc2dca9a18d4a4057921d1bcb22da05e68e46b1d0") + require.NoError(t, err) + require.EqualValues(t, expectedToAddr, *eth155Tx.legacyTx.To) + + // Verify sender address + expectedFromAddr, err := ParseEthAddress("0xA2BBB73aC59b256415e91A820b224dbAF2C268FA") + require.NoError(t, err) + sender, err := eth155Tx.Sender() + require.NoError(t, err) + expectedFromFilecoinAddr, err := expectedFromAddr.ToFilecoinAddress() + require.NoError(t, err) + require.EqualValues(t, expectedFromFilecoinAddr, sender) + + // Verify transaction value + expectedValue, ok := big.NewInt(0).SetString("100", 10) + require.True(t, ok) + require.True(t, eth155Tx.legacyTx.Value.Cmp(expectedValue) == 0) + + // Verify gas limit and gas price + expectedGasPrice, ok := big.NewInt(0).SetString("1000000000", 10) + require.True(t, ok) + require.EqualValues(t, 6144442, eth155Tx.legacyTx.GasLimit) + require.True(t, eth155Tx.legacyTx.GasPrice.Cmp(expectedGasPrice) == 0) + + require.Empty(t, eth155Tx.legacyTx.Input) + + // Verify signature values (v, r, s) + expectedV, ok := big.NewInt(0).SetString("0297", 16) + require.True(t, ok) + require.True(t, eth155Tx.legacyTx.V.Cmp(expectedV) == 0) + + expectedR, ok := big.NewInt(0).SetString("f91ee69c4603c4f21131467ee9e06ad4a96d0a29fa8064db61f3adaea0eb6e92", 16) + require.True(t, ok) + require.True(t, eth155Tx.legacyTx.R.Cmp(expectedR) == 0) + + expectedS, ok := big.NewInt(0).SetString("7181e306bb8f773d94cc3b75e9835de00c004e072e6630c0c46971d38706bb01", 16) + require.True(t, ok) + require.True(t, eth155Tx.legacyTx.S.Cmp(expectedS) == 0) + + // Convert to signed Filecoin message and verify fields + smsg, err := ToSignedFilecoinMessage(eth155Tx) + require.NoError(t, err) + + require.EqualValues(t, smsg.Message.From, sender) + + expectedToFilecoinAddr, err := eth155Tx.legacyTx.To.ToFilecoinAddress() + require.NoError(t, err) + require.EqualValues(t, smsg.Message.To, expectedToFilecoinAddr) + require.EqualValues(t, smsg.Message.Value, eth155Tx.legacyTx.Value) + require.EqualValues(t, smsg.Message.GasLimit, eth155Tx.legacyTx.GasLimit) + require.EqualValues(t, smsg.Message.GasFeeCap, eth155Tx.legacyTx.GasPrice) + require.EqualValues(t, smsg.Message.GasPremium, eth155Tx.legacyTx.GasPrice) + require.EqualValues(t, smsg.Message.Nonce, eth155Tx.legacyTx.Nonce) + require.Empty(t, smsg.Message.Params) + require.EqualValues(t, smsg.Message.Method, builtintypes.MethodsEVM.InvokeContract) + + // Convert signed Filecoin message back to Ethereum transaction and verify equality + ethTx, err := EthTransactionFromSignedFilecoinMessage(smsg) + require.NoError(t, err) + convertedLegacyTx, ok := ethTx.(*EthLegacy155TxArgs) + require.True(t, ok) + eth155Tx.legacyTx.Input = nil + require.EqualValues(t, convertedLegacyTx, eth155Tx) + + // Verify EthTx fields + ethTxVal, err := eth155Tx.ToEthTx(smsg) + require.NoError(t, err) + expectedHash, err := eth155Tx.TxHash() + require.NoError(t, err) + require.EqualValues(t, ethTxVal.Hash, expectedHash) + require.Nil(t, ethTxVal.MaxFeePerGas) + require.Nil(t, ethTxVal.MaxPriorityFeePerGas) + require.EqualValues(t, ethTxVal.Gas, eth155Tx.legacyTx.GasLimit) + require.EqualValues(t, ethTxVal.Value, eth155Tx.legacyTx.Value) + require.EqualValues(t, ethTxVal.Nonce, eth155Tx.legacyTx.Nonce) + require.EqualValues(t, ethTxVal.To, eth155Tx.legacyTx.To) + require.EqualValues(t, ethTxVal.From, expectedFromAddr) +} + +func TestDeriveEIP155ChainId(t *testing.T) { + tests := []struct { + name string + v big.Int + expectedChainId big.Int + }{ + { + name: "V equals 27", + v: big.NewInt(27), + expectedChainId: big.NewInt(0), + }, + { + name: "V equals 28", + v: big.NewInt(28), + expectedChainId: big.NewInt(0), + }, + { + name: "V small chain ID", + v: big.NewInt(37), // (37 - 35) / 2 = 1 + expectedChainId: big.NewInt(1), + }, + { + name: "V large chain ID", + v: big.NewInt(1001), // (1001 - 35) / 2 = 483 + expectedChainId: big.NewInt(483), + }, + { + name: "V very large chain ID", + v: big.NewInt(1 << 20), // (1048576 - 35) / 2 = 524770 + expectedChainId: big.NewInt(524270), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := deriveEIP155ChainId(tt.v) + require.True(t, result.Equals(tt.expectedChainId), "Expected %s, got %s for V=%s", tt.expectedChainId.String(), result.String(), tt.v.String()) + }) + } +} + +func TestCalcEIP155TxSignatureLen(t *testing.T) { + tests := []struct { + name string + chainID uint64 + expected int + }{ + { + name: "ChainID that fits in 1 byte", + chainID: 0x01, + expected: EthLegacyHomesteadTxSignatureLen + 1 - 1, + }, + { + name: "ChainID that fits in 2 bytes", + chainID: 0x0100, + expected: EthLegacyHomesteadTxSignatureLen + 2 - 1, + }, + { + name: "ChainID that fits in 3 bytes", + chainID: 0x010000, + expected: EthLegacyHomesteadTxSignatureLen + 3 - 1, + }, + { + name: "ChainID that fits in 4 bytes", + chainID: 0x01000000, + expected: EthLegacyHomesteadTxSignatureLen + 4 - 1, + }, + { + name: "ChainID that fits in 6 bytes", + chainID: 0x010000000000, + expected: EthLegacyHomesteadTxSignatureLen + 6 - 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := calcEIP155TxSignatureLen(tt.chainID, 1) + if result != tt.expected { + t.Errorf("calcEIP155TxSignatureLen(%d) = %d, want %d", tt.chainID, result, tt.expected) + } + }) + } +} diff --git a/chain/types/ethtypes/eth_legacy_homestead_transactions.go b/chain/types/ethtypes/eth_legacy_homestead_transactions.go new file mode 100644 index 00000000000..fbfe6d6a346 --- /dev/null +++ b/chain/types/ethtypes/eth_legacy_homestead_transactions.go @@ -0,0 +1,228 @@ +package ethtypes + +import ( + "fmt" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + typescrypto "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/lotus/chain/types" +) + +var _ EthTransaction = (*EthLegacyHomesteadTxArgs)(nil) + +type EthLegacyHomesteadTxArgs struct { + Nonce int `json:"nonce"` + GasPrice big.Int `json:"gasPrice"` + GasLimit int `json:"gasLimit"` + To *EthAddress `json:"to"` + Value big.Int `json:"value"` + Input []byte `json:"input"` + V big.Int `json:"v"` + R big.Int `json:"r"` + S big.Int `json:"s"` +} + +func (tx *EthLegacyHomesteadTxArgs) ToEthTx(smsg *types.SignedMessage) (EthTx, error) { + from, err := EthAddressFromFilecoinAddress(smsg.Message.From) + if err != nil { + return EthTx{}, fmt.Errorf("sender was not an eth account") + } + hash, err := tx.TxHash() + if err != nil { + return EthTx{}, fmt.Errorf("failed to get tx hash: %w", err) + } + + gasPrice := EthBigInt(tx.GasPrice) + ethTx := EthTx{ + ChainID: EthLegacyHomesteadTxChainID, + Type: EthLegacyTxType, + Nonce: EthUint64(tx.Nonce), + Hash: hash, + To: tx.To, + Value: EthBigInt(tx.Value), + Input: tx.Input, + Gas: EthUint64(tx.GasLimit), + GasPrice: &gasPrice, + From: from, + R: EthBigInt(tx.R), + S: EthBigInt(tx.S), + V: EthBigInt(tx.V), + } + + return ethTx, nil +} + +func (tx *EthLegacyHomesteadTxArgs) ToUnsignedFilecoinMessage(from address.Address) (*types.Message, error) { + mi, err := getFilecoinMethodInfo(tx.To, tx.Input) + if err != nil { + return nil, xerrors.Errorf("failed to get method info: %w", err) + } + + return &types.Message{ + Version: 0, + To: mi.to, + From: from, + Nonce: uint64(tx.Nonce), + Value: tx.Value, + GasLimit: int64(tx.GasLimit), + GasFeeCap: tx.GasPrice, + GasPremium: tx.GasPrice, + Method: mi.method, + Params: mi.params, + }, nil +} + +func (tx *EthLegacyHomesteadTxArgs) ToVerifiableSignature(sig []byte) ([]byte, error) { + if len(sig) != EthLegacyHomesteadTxSignatureLen { + return nil, fmt.Errorf("signature should be %d bytes long (1 byte metadata, %d bytes sig data), but got %d bytes", + EthLegacyHomesteadTxSignatureLen, EthLegacyHomesteadTxSignatureLen-1, len(sig)) + } + if sig[0] != EthLegacyHomesteadTxSignaturePrefix { + return nil, fmt.Errorf("expected signature prefix 0x%x, but got 0x%x", EthLegacyHomesteadTxSignaturePrefix, sig[0]) + } + + // Remove the prefix byte as it's only used for legacy transaction identification + sig = sig[1:] + + // Extract the 'v' value from the signature, which is the last byte in Ethereum signatures + vValue := big.NewFromGo(big.NewInt(0).SetBytes(sig[64:])) + + // Adjust 'v' value for compatibility with new transactions: 27 -> 0, 28 -> 1 + if vValue.Equals(big.NewInt(27)) { + sig[64] = 0 + } else if vValue.Equals(big.NewInt(28)) { + sig[64] = 1 + } else { + return nil, fmt.Errorf("invalid 'v' value: expected 27 or 28, got %d", vValue.Int64()) + } + + return sig, nil +} + +func (tx *EthLegacyHomesteadTxArgs) ToRlpUnsignedMsg() ([]byte, error) { + return toRlpUnsignedMsg(tx) +} + +func (tx *EthLegacyHomesteadTxArgs) TxHash() (EthHash, error) { + rlp, err := tx.ToRlpSignedMsg() + if err != nil { + return EthHash{}, err + } + return EthHashFromTxBytes(rlp), nil +} + +func (tx *EthLegacyHomesteadTxArgs) ToRlpSignedMsg() ([]byte, error) { + return toRlpSignedMsg(tx, tx.V, tx.R, tx.S) +} + +func (tx *EthLegacyHomesteadTxArgs) Signature() (*typescrypto.Signature, error) { + // throw an error if the v value is not 27 or 28 + if !tx.V.Equals(big.NewInt(27)) && !tx.V.Equals(big.NewInt(28)) { + return nil, fmt.Errorf("legacy homestead transactions only support 27 or 28 for v") + } + r := tx.R.Int.Bytes() + s := tx.S.Int.Bytes() + v := tx.V.Int.Bytes() + + sig := append([]byte{}, padLeadingZeros(r, 32)...) + sig = append(sig, padLeadingZeros(s, 32)...) + if len(v) == 0 { + sig = append(sig, 0) + } else { + sig = append(sig, v[0]) + } + // pre-pend a one byte marker so nodes know that this is a legacy transaction + sig = append([]byte{EthLegacyHomesteadTxSignaturePrefix}, sig...) + + if len(sig) != EthLegacyHomesteadTxSignatureLen { + return nil, fmt.Errorf("signature is not %d bytes", EthLegacyHomesteadTxSignatureLen) + } + + return &typescrypto.Signature{ + Type: typescrypto.SigTypeDelegated, Data: sig, + }, nil +} + +func (tx *EthLegacyHomesteadTxArgs) Sender() (address.Address, error) { + return sender(tx) +} + +func (tx *EthLegacyHomesteadTxArgs) Type() int { + return EthLegacyTxType +} + +func (tx *EthLegacyHomesteadTxArgs) InitialiseSignature(sig typescrypto.Signature) error { + if sig.Type != typescrypto.SigTypeDelegated { + return fmt.Errorf("RecoverSignature only supports Delegated signature") + } + + if len(sig.Data) != EthLegacyHomesteadTxSignatureLen { + return fmt.Errorf("signature should be %d bytes long, but got %d bytes", EthLegacyHomesteadTxSignatureLen, len(sig.Data)) + } + + if sig.Data[0] != EthLegacyHomesteadTxSignaturePrefix { + return fmt.Errorf("expected signature prefix 0x01, but got 0x%x", sig.Data[0]) + } + + // ignore the first byte of the signature as it's only used for legacy transaction identification + r_, err := parseBigInt(sig.Data[1:33]) + if err != nil { + return fmt.Errorf("cannot parse r into EthBigInt: %w", err) + } + + s_, err := parseBigInt(sig.Data[33:65]) + if err != nil { + return fmt.Errorf("cannot parse s into EthBigInt: %w", err) + } + + v_, err := parseBigInt([]byte{sig.Data[65]}) + if err != nil { + return fmt.Errorf("cannot parse v into EthBigInt: %w", err) + } + + if !v_.Equals(big.NewInt(27)) && !v_.Equals(big.NewInt(28)) { + return fmt.Errorf("legacy homestead transactions only support 27 or 28 for v") + } + + tx.R = r_ + tx.S = s_ + tx.V = v_ + return nil +} + +func (tx *EthLegacyHomesteadTxArgs) packTxFields() ([]interface{}, error) { + nonce, err := formatInt(tx.Nonce) + if err != nil { + return nil, err + } + + // format gas price + gasPrice, err := formatBigInt(tx.GasPrice) + if err != nil { + return nil, err + } + + gasLimit, err := formatInt(tx.GasLimit) + if err != nil { + return nil, err + } + + value, err := formatBigInt(tx.Value) + if err != nil { + return nil, err + } + + res := []interface{}{ + nonce, + gasPrice, + gasLimit, + formatEthAddr(tx.To), + value, + tx.Input, + } + return res, nil +} diff --git a/chain/types/ethtypes/eth_legacy_homestead_transactions_test.go b/chain/types/ethtypes/eth_legacy_homestead_transactions_test.go new file mode 100644 index 00000000000..cb4b4ac4e56 --- /dev/null +++ b/chain/types/ethtypes/eth_legacy_homestead_transactions_test.go @@ -0,0 +1,299 @@ +package ethtypes + +import ( + "encoding/hex" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/big" + builtintypes "github.com/filecoin-project/go-state-types/builtin" + + "github.com/filecoin-project/lotus/lib/sigs" +) + +func TestEthLegacyHomesteadTxArgs(t *testing.T) { + testcases := []struct { + RawTx string + ExpectedNonce uint64 + ExpectedTo string + ExpectedInput string + ExpectedGasPrice big.Int + ExpectedGasLimit int + ExpectErr bool + }{ + { + "0xf882800182540894095e7baea6a6c7c4c2dfeb977efac326af552d8780a3deadbeef0000000101010010101010101010101010101aaabbbbbbcccccccddddddddd1ba048b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353a01fffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804", + 0x0, + "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", + "0xdeadbeef0000000101010010101010101010101010101aaabbbbbbcccccccddddddddd", + big.NewInt(1), + 0x5408, + false, + }, + { + "0xf85f030182520794b94f5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba098ff921201554726367d2be8c804a7ff89ccf285ebc57dff8ae4c44b9c19ac4aa07778cde41a8a37f6a087622b38bc201bd3e7df06dce067569d4def1b53dba98c", + 0x3, + "0xb94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "0x", + big.NewInt(1), + 0x5207, + false, + }, + } + + for i, tc := range testcases { + // parse txargs + tx, err := parseLegacyTx(mustDecodeHex(tc.RawTx)) + require.NoError(t, err) + + msgRecovered, err := tx.ToRlpUnsignedMsg() + require.NoError(t, err) + + // verify signatures + from, err := tx.Sender() + require.NoError(t, err) + + smsg, err := ToSignedFilecoinMessage(tx) + require.NoError(t, err) + + sig := smsg.Signature.Data[:] + sig = sig[1:] + vValue := big.NewInt(0).SetBytes(sig[64:]) + vValue_ := big.Sub(big.NewFromGo(vValue), big.NewInt(27)) + sig[64] = byte(vValue_.Uint64()) + smsg.Signature.Data = sig + + err = sigs.Verify(&smsg.Signature, from, msgRecovered) + require.NoError(t, err) + + txArgs := tx.(*EthLegacyHomesteadTxArgs) + // verify data + require.EqualValues(t, tc.ExpectedNonce, txArgs.Nonce, i) + + expectedTo, err := ParseEthAddress(tc.ExpectedTo) + require.NoError(t, err) + require.EqualValues(t, expectedTo, *txArgs.To, i) + require.EqualValues(t, tc.ExpectedInput, "0x"+hex.EncodeToString(txArgs.Input)) + require.EqualValues(t, tc.ExpectedGasPrice, txArgs.GasPrice) + require.EqualValues(t, tc.ExpectedGasLimit, txArgs.GasLimit) + } +} + +func TestLegacyHomesteadSignatures(t *testing.T) { + testcases := []struct { + RawTx string + ExpectedR string + ExpectedS string + ExpectedV string + ExpectErr bool + ExpectErrMsg string + ExpectVMismatch bool + }{ + { + "0xf882800182540894095e7baea6a6c7c4c2dfeb977efac326af552d8780a3deadbeef0000000101010010101010101010101010101aaabbbbbbcccccccddddddddd1ba048b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353a01fffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804", + "0x48b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353", + "0x1fffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804", + "0x1b", + false, + "", + false, + }, + { + "0xf85f030182520794b94f5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba098ff921201554726367d2be8c804a7ff89ccf285ebc57dff8ae4c44b9c19ac4aa07778cde41a8a37f6a087622b38bc201bd3e7df06dce067569d4def1b53dba98c", + "0x98ff921201554726367d2be8c804a7ff89ccf285ebc57dff8ae4c44b9c19ac4a", + "0x7778cde41a8a37f6a087622b38bc201bd3e7df06dce067569d4def1b53dba98c", + "0x1b", + false, + "", + false, + }, + { + "0xf882800182540894095e7baea6a6c7c4c2dfeb977efac326af552d8780a3deadbeef0000000101010010101010101010101010101aaabbbbbbcccccccddddddddd1ba048b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353a01fffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804", + "0x48b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353", + "0x1fffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804", + "0x1c", + false, + "", + true, + }, + { + "0xf882800182540894095e7baea6a6c7c4c2dfeb977efac326af552d8780a3deadbeef0000000101010010101010101010101010101aaabbbbbbcccccccddddddddd1ba048b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353a01fffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804", + "0x48b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353", + "0x1fffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804", + "0x1f", + false, + "", + true, + }, + { + "0x00", + "", + "", + "", + true, + "not a legacy eth transaction", + false, + }, + } + + for i, tc := range testcases { + tx, err := parseLegacyTx(mustDecodeHex(tc.RawTx)) + if tc.ExpectErr { + require.Error(t, err) + require.Contains(t, err.Error(), tc.ExpectErrMsg) + continue + } + require.Nil(t, err) + + sig, err := tx.Signature() + require.Nil(t, err) + + require.NoError(t, tx.InitialiseSignature(*sig)) + + txArgs := tx.(*EthLegacyHomesteadTxArgs) + + require.Equal(t, tc.ExpectedR, "0x"+txArgs.R.Text(16), i) + require.Equal(t, tc.ExpectedS, "0x"+txArgs.S.Text(16), i) + + if tc.ExpectVMismatch { + require.NotEqual(t, tc.ExpectedV, "0x"+txArgs.V.Text(16), i) + } else { + require.Equal(t, tc.ExpectedV, "0x"+txArgs.V.Text(16), i) + } + } +} + +// https://etherscan.io/getRawTx?tx=0xc55e2b90168af6972193c1f86fa4d7d7b31a29c156665d15b9cd48618b5177ef +// https://tools.deth.net/tx-decoder +func TestEtherScanLegacyRLP(t *testing.T) { + rlp := "0xf8718301efc58506fc23ac008305161594104994f45d9d697ca104e5704a7b77d7fec3537c890821878651a4d70000801ba051222d91a379452395d0abaff981af4cfcc242f25cfaf947dea8245a477731f9a03a997c910b4701cca5d933fb26064ee5af7fe3236ff0ef2b58aa50b25aff8ca5" + bz := mustDecodeHex(rlp) + + tx, err := parseLegacyTx(bz) + require.NoError(t, err) + + ethLegacyTx, ok := tx.(*EthLegacyHomesteadTxArgs) + require.True(t, ok) + + // Verify nonce + require.EqualValues(t, 0x1efc5, ethLegacyTx.Nonce) + + // Verify recipient address + expectedToAddr, err := ParseEthAddress("0x104994f45d9d697ca104e5704a7b77d7fec3537c") + require.NoError(t, err) + require.EqualValues(t, expectedToAddr, *ethLegacyTx.To) + + // Verify sender address + expectedFromAddr, err := ParseEthAddress("0x32Be343B94f860124dC4fEe278FDCBD38C102D88") + require.NoError(t, err) + sender, err := ethLegacyTx.Sender() + require.NoError(t, err) + expectedFromFilecoinAddr, err := expectedFromAddr.ToFilecoinAddress() + require.NoError(t, err) + require.EqualValues(t, expectedFromFilecoinAddr, sender) + + // Verify transaction value + expectedValue, ok := big.NewInt(0).SetString("821878651a4d70000", 16) + require.True(t, ok) + require.True(t, ethLegacyTx.Value.Cmp(expectedValue) == 0) + + // Verify gas limit and gas price + expectedGasPrice, ok := big.NewInt(0).SetString("6fc23ac00", 16) + require.True(t, ok) + require.EqualValues(t, 0x51615, ethLegacyTx.GasLimit) + require.True(t, ethLegacyTx.GasPrice.Cmp(expectedGasPrice) == 0) + + require.Empty(t, ethLegacyTx.Input) + + // Verify signature values (v, r, s) + expectedV, ok := big.NewInt(0).SetString("1b", 16) + require.True(t, ok) + require.True(t, ethLegacyTx.V.Cmp(expectedV) == 0) + + expectedR, ok := big.NewInt(0).SetString("51222d91a379452395d0abaff981af4cfcc242f25cfaf947dea8245a477731f9", 16) + require.True(t, ok) + require.True(t, ethLegacyTx.R.Cmp(expectedR) == 0) + + expectedS, ok := big.NewInt(0).SetString("3a997c910b4701cca5d933fb26064ee5af7fe3236ff0ef2b58aa50b25aff8ca5", 16) + require.True(t, ok) + require.True(t, ethLegacyTx.S.Cmp(expectedS) == 0) + + // Convert to signed Filecoin message and verify fields + smsg, err := ToSignedFilecoinMessage(ethLegacyTx) + require.NoError(t, err) + + require.EqualValues(t, smsg.Message.From, sender) + + expectedToFilecoinAddr, err := ethLegacyTx.To.ToFilecoinAddress() + require.NoError(t, err) + require.EqualValues(t, smsg.Message.To, expectedToFilecoinAddr) + require.EqualValues(t, smsg.Message.Value, ethLegacyTx.Value) + require.EqualValues(t, smsg.Message.GasLimit, ethLegacyTx.GasLimit) + require.EqualValues(t, smsg.Message.GasFeeCap, ethLegacyTx.GasPrice) + require.EqualValues(t, smsg.Message.GasPremium, ethLegacyTx.GasPrice) + require.EqualValues(t, smsg.Message.Nonce, ethLegacyTx.Nonce) + require.Empty(t, smsg.Message.Params) + require.EqualValues(t, smsg.Message.Method, builtintypes.MethodsEVM.InvokeContract) + + // Convert signed Filecoin message back to Ethereum transaction and verify equality + ethTx, err := EthTransactionFromSignedFilecoinMessage(smsg) + require.NoError(t, err) + convertedLegacyTx, ok := ethTx.(*EthLegacyHomesteadTxArgs) + require.True(t, ok) + ethLegacyTx.Input = nil + require.EqualValues(t, convertedLegacyTx, ethLegacyTx) + + // Verify EthTx fields + ethTxVal, err := ethLegacyTx.ToEthTx(smsg) + require.NoError(t, err) + expectedHash, err := ethLegacyTx.TxHash() + require.NoError(t, err) + require.EqualValues(t, ethTxVal.Hash, expectedHash) + require.Nil(t, ethTxVal.MaxFeePerGas) + require.Nil(t, ethTxVal.MaxPriorityFeePerGas) + require.EqualValues(t, ethTxVal.Gas, ethLegacyTx.GasLimit) + require.EqualValues(t, ethTxVal.Value, ethLegacyTx.Value) + require.EqualValues(t, ethTxVal.Nonce, ethLegacyTx.Nonce) + require.EqualValues(t, ethTxVal.To, ethLegacyTx.To) + require.EqualValues(t, ethTxVal.From, expectedFromAddr) +} + +func TestFailurePaths(t *testing.T) { + // Test case for invalid RLP + invalidRLP := "0x08718301efc58506fc23ac008305161594104994f45d9d697ca104e5704a7b77d7fec3537c890821878651a4d70000801ba051222d91a379452395d0abaff981af4cfcc242f25cfaf947dea8245a477731f9a03a997c910b4701cca5d933fb26064ee5af7fe3236ff0ef2b58aa50b25aff8ca5" + decoded, err := hex.DecodeString(strings.TrimPrefix(invalidRLP, "0x")) + require.NoError(t, err) + + _, err = parseLegacyTx(decoded) + require.Error(t, err, "Expected error for invalid RLP") + + // Test case for mangled signature + mangledSignatureRLP := "0xf8718301efc58506fc23ac008305161594104994f45d9d697ca104e5704a7b77d7fec3537c890821878651a4d70000801ba051222d91a379452395d0abaff981af4cfcc242f25cfaf947dea8245a477731f9a03a997c910b4701cca5d933fb26064ee5af7fe3236ff0ef2b58aa50b25aff8ca5" + decodedSig, err := hex.DecodeString(strings.TrimPrefix(mangledSignatureRLP, "0x")) + require.NoError(t, err) + + tx, err := parseLegacyTx(decodedSig) + require.NoError(t, err) + + ethLegacyTx, ok := tx.(*EthLegacyHomesteadTxArgs) + require.True(t, ok) + + // Mangle R value + ethLegacyTx.R = big.Add(ethLegacyTx.R, big.NewInt(1)) + + expectedFromAddr, err := ParseEthAddress("0x32Be343B94f860124dC4fEe278FDCBD38C102D88") + require.NoError(t, err) + expectedFromFilecoinAddr, err := expectedFromAddr.ToFilecoinAddress() + require.NoError(t, err) + + senderAddr, err := ethLegacyTx.Sender() + require.NoError(t, err) + require.NotEqual(t, senderAddr, expectedFromFilecoinAddr, "Expected sender address to not match after mangling R value") + + // Mangle V value + ethLegacyTx.V = big.NewInt(1) + _, err = ethLegacyTx.Sender() + require.Error(t, err, "Expected error when V value is not 27 or 28") +} diff --git a/chain/types/ethtypes/eth_transactions.go b/chain/types/ethtypes/eth_transactions.go index a3b1d01502a..d1713248a6d 100644 --- a/chain/types/ethtypes/eth_transactions.go +++ b/chain/types/ethtypes/eth_transactions.go @@ -3,12 +3,12 @@ package ethtypes import ( "bytes" "encoding/binary" + "errors" "fmt" mathbig "math/big" cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/crypto/sha3" - "golang.org/x/xerrors" "github.com/filecoin-project/go-address" gocrypto "github.com/filecoin-project/go-crypto" @@ -21,8 +21,50 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) -const Eip1559TxType = 2 +const ( + EthLegacyTxType = 0x00 + EIP1559TxType = 0x02 +) + +const ( + EthEIP1559TxSignatureLen = 65 + EthLegacyHomesteadTxSignatureLen = 66 + EthLegacyHomesteadTxSignaturePrefix = 0x01 + EthLegacy155TxSignaturePrefix = 0x02 + EthLegacyHomesteadTxChainID = 0x00 +) + +var ( + EthLegacy155TxSignatureLen0 int + EthLegacy155TxSignatureLen1 int +) + +func init() { + EthLegacy155TxSignatureLen0 = calcEIP155TxSignatureLen(build.Eip155ChainId, 35) + EthLegacy155TxSignatureLen1 = calcEIP155TxSignatureLen(build.Eip155ChainId, 36) +} + +// EthTransaction defines the interface for Ethereum-like transactions. +// It provides methods to convert transactions to various formats, +// retrieve transaction details, and manipulate transaction signatures. +type EthTransaction interface { + Type() int + Sender() (address.Address, error) + Signature() (*typescrypto.Signature, error) + InitialiseSignature(sig typescrypto.Signature) error + ToUnsignedFilecoinMessage(from address.Address) (*types.Message, error) + ToRlpUnsignedMsg() ([]byte, error) + ToRlpSignedMsg() ([]byte, error) + TxHash() (EthHash, error) + ToVerifiableSignature(sig []byte) ([]byte, error) + ToEthTx(*types.SignedMessage) (EthTx, error) +} +// EthTx represents an Ethereum transaction structure, encapsulating fields that align with the standard Ethereum transaction components. +// This structure can represent both EIP-1559 transactions and legacy Homestead transactions: +// - In EIP-1559 transactions, the `GasPrice` field is set to nil/empty. +// - In legacy Homestead transactions, the `GasPrice` field is populated to specify the fee per unit of gas, while the `MaxFeePerGas` and `MaxPriorityFeePerGas` fields are set to nil/empty. +// Additionally, both the `ChainID` and the `Type` fields are set to 0 in legacy Homestead transactions to differentiate them from EIP-1559 transactions. type EthTx struct { ChainID EthUint64 `json:"chainId"` Nonce EthUint64 `json:"nonce"` @@ -36,189 +78,129 @@ type EthTx struct { Type EthUint64 `json:"type"` Input EthBytes `json:"input"` Gas EthUint64 `json:"gas"` - MaxFeePerGas EthBigInt `json:"maxFeePerGas"` - MaxPriorityFeePerGas EthBigInt `json:"maxPriorityFeePerGas"` + MaxFeePerGas *EthBigInt `json:"maxFeePerGas,omitempty"` + MaxPriorityFeePerGas *EthBigInt `json:"maxPriorityFeePerGas,omitempty"` + GasPrice *EthBigInt `json:"gasPrice,omitempty"` AccessList []EthHash `json:"accessList"` V EthBigInt `json:"v"` R EthBigInt `json:"r"` S EthBigInt `json:"s"` } -type EthTxArgs struct { - ChainID int `json:"chainId"` - Nonce int `json:"nonce"` - To *EthAddress `json:"to"` - Value big.Int `json:"value"` - MaxFeePerGas big.Int `json:"maxFeePerGas"` - MaxPriorityFeePerGas big.Int `json:"maxPriorityFeePerGas"` - GasLimit int `json:"gasLimit"` - Input []byte `json:"input"` - V big.Int `json:"v"` - R big.Int `json:"r"` - S big.Int `json:"s"` +func (tx *EthTx) GasFeeCap() (EthBigInt, error) { + if tx.GasPrice == nil && tx.MaxFeePerGas == nil { + return EthBigInt{}, fmt.Errorf("gas fee cap is not set") + } + if tx.MaxFeePerGas != nil { + return *tx.MaxFeePerGas, nil + } + return *tx.GasPrice, nil } -// EthTxFromSignedEthMessage does NOT populate: -// - BlockHash -// - BlockNumber -// - TransactionIndex -// - Hash -func EthTxFromSignedEthMessage(smsg *types.SignedMessage) (EthTx, error) { - // The from address is always an f410f address, never an ID or other address. - if !IsEthAddress(smsg.Message.From) { - return EthTx{}, xerrors.Errorf("sender must be an eth account, was %s", smsg.Message.From) +func (tx *EthTx) GasPremium() (EthBigInt, error) { + if tx.GasPrice == nil && tx.MaxPriorityFeePerGas == nil { + return EthBigInt{}, fmt.Errorf("gas premium is not set") } - // Probably redundant, but we might as well check. - if smsg.Signature.Type != typescrypto.SigTypeDelegated { - return EthTx{}, xerrors.Errorf("signature is not delegated type, is type: %d", smsg.Signature.Type) + if tx.MaxPriorityFeePerGas != nil { + return *tx.MaxPriorityFeePerGas, nil } - txArgs, err := EthTxArgsFromUnsignedEthMessage(&smsg.Message) - if err != nil { - return EthTx{}, xerrors.Errorf("failed to convert the unsigned message: %w", err) + return *tx.GasPrice, nil +} + +func EthTransactionFromSignedFilecoinMessage(smsg *types.SignedMessage) (EthTransaction, error) { + if smsg == nil { + return nil, errors.New("signed message is nil") } - r, s, v, err := RecoverSignature(smsg.Signature) - if err != nil { - return EthTx{}, xerrors.Errorf("failed to recover signature: %w", err) + // Ensure the signature type is delegated. + if smsg.Signature.Type != typescrypto.SigTypeDelegated { + return nil, fmt.Errorf("signature is not delegated type, is type: %d", smsg.Signature.Type) } - from, err := EthAddressFromFilecoinAddress(smsg.Message.From) + // Convert Filecoin address to Ethereum address. + _, err := EthAddressFromFilecoinAddress(smsg.Message.From) if err != nil { - // This should be impossible as we've already asserted that we have an EthAddress - // sender... - return EthTx{}, xerrors.Errorf("sender was not an eth account") + return nil, fmt.Errorf("sender was not an eth account") } - return EthTx{ - Nonce: EthUint64(txArgs.Nonce), - ChainID: EthUint64(txArgs.ChainID), - To: txArgs.To, - From: from, - Value: EthBigInt(txArgs.Value), - Type: Eip1559TxType, - Gas: EthUint64(txArgs.GasLimit), - MaxFeePerGas: EthBigInt(txArgs.MaxFeePerGas), - MaxPriorityFeePerGas: EthBigInt(txArgs.MaxPriorityFeePerGas), - AccessList: []EthHash{}, - V: v, - R: r, - S: s, - Input: txArgs.Input, - }, nil -} - -func EthTxArgsFromUnsignedEthMessage(msg *types.Message) (EthTxArgs, error) { - var ( - to *EthAddress - params []byte - err error - ) - - if msg.Version != 0 { - return EthTxArgs{}, xerrors.Errorf("unsupported msg version: %d", msg.Version) + // Extract Ethereum parameters and recipient from the message. + params, to, err := getEthParamsAndRecipient(&smsg.Message) + if err != nil { + return nil, fmt.Errorf("failed to parse input params and recipient: %w", err) } - if len(msg.Params) > 0 { - paramsReader := bytes.NewReader(msg.Params) - params, err = cbg.ReadByteArray(paramsReader, uint64(len(msg.Params))) - if err != nil { - return EthTxArgs{}, xerrors.Errorf("failed to read params byte array: %w", err) - } - if paramsReader.Len() != 0 { - return EthTxArgs{}, xerrors.Errorf("extra data found in params") - } - if len(params) == 0 { - return EthTxArgs{}, xerrors.Errorf("non-empty params encode empty byte array") - } + // Check for supported message version. + if smsg.Message.Version != 0 { + return nil, fmt.Errorf("unsupported msg version: %d", smsg.Message.Version) } - if msg.To == builtintypes.EthereumAddressManagerActorAddr { - if msg.Method != builtintypes.MethodsEAM.CreateExternal { - return EthTxArgs{}, fmt.Errorf("unsupported EAM method") + // Determine the type of transaction based on the signature length + switch len(smsg.Signature.Data) { + case EthEIP1559TxSignatureLen: + tx := Eth1559TxArgs{ + ChainID: build.Eip155ChainId, + Nonce: int(smsg.Message.Nonce), + To: to, + Value: smsg.Message.Value, + Input: params, + MaxFeePerGas: smsg.Message.GasFeeCap, + MaxPriorityFeePerGas: smsg.Message.GasPremium, + GasLimit: int(smsg.Message.GasLimit), } - } else if msg.Method == builtintypes.MethodsEVM.InvokeContract { - addr, err := EthAddressFromFilecoinAddress(msg.To) - if err != nil { - return EthTxArgs{}, err + if err := tx.InitialiseSignature(smsg.Signature); err != nil { + return nil, fmt.Errorf("failed to initialise signature: %w", err) } - to = &addr - } else { - return EthTxArgs{}, - xerrors.Errorf("invalid methodnum %d: only allowed method is InvokeContract(%d)", - msg.Method, builtintypes.MethodsEVM.InvokeContract) - } - - return EthTxArgs{ - ChainID: build.Eip155ChainId, - Nonce: int(msg.Nonce), - To: to, - Value: msg.Value, - Input: params, - MaxFeePerGas: msg.GasFeeCap, - MaxPriorityFeePerGas: msg.GasPremium, - GasLimit: int(msg.GasLimit), - }, nil -} - -func (tx *EthTxArgs) ToUnsignedMessage(from address.Address) (*types.Message, error) { - if tx.ChainID != build.Eip155ChainId { - return nil, xerrors.Errorf("unsupported chain id: %d", tx.ChainID) - } - - var err error - var params []byte - if len(tx.Input) > 0 { - buf := new(bytes.Buffer) - if err = cbg.WriteByteArray(buf, tx.Input); err != nil { - return nil, xerrors.Errorf("failed to write input args: %w", err) + return &tx, nil + + case EthLegacyHomesteadTxSignatureLen, EthLegacy155TxSignatureLen0, EthLegacy155TxSignatureLen1: + legacyTx := &EthLegacyHomesteadTxArgs{ + Nonce: int(smsg.Message.Nonce), + To: to, + Value: smsg.Message.Value, + Input: params, + GasPrice: smsg.Message.GasFeeCap, + GasLimit: int(smsg.Message.GasLimit), } - params = buf.Bytes() - } - - var to address.Address - var method abi.MethodNum - // nil indicates the EAM, only CreateExternal is allowed - if tx.To == nil { - method = builtintypes.MethodsEAM.CreateExternal - to = builtintypes.EthereumAddressManagerActorAddr - } else { - method = builtintypes.MethodsEVM.InvokeContract - to, err = tx.To.ToFilecoinAddress() - if err != nil { - return nil, xerrors.Errorf("failed to convert To into filecoin addr: %w", err) + // Process based on the first byte of the signature + switch smsg.Signature.Data[0] { + case EthLegacyHomesteadTxSignaturePrefix: + if err := legacyTx.InitialiseSignature(smsg.Signature); err != nil { + return nil, fmt.Errorf("failed to initialise signature: %w", err) + } + return legacyTx, nil + case EthLegacy155TxSignaturePrefix: + tx := &EthLegacy155TxArgs{ + legacyTx: legacyTx, + } + if err := tx.InitialiseSignature(smsg.Signature); err != nil { + return nil, fmt.Errorf("failed to initialise signature: %w", err) + } + return tx, nil + default: + return nil, fmt.Errorf("unsupported legacy transaction; first byte of signature is %d", smsg.Signature.Data[0]) } - } - return &types.Message{ - Version: 0, - To: to, - From: from, - Nonce: uint64(tx.Nonce), - Value: tx.Value, - GasLimit: int64(tx.GasLimit), - GasFeeCap: tx.MaxFeePerGas, - GasPremium: tx.MaxPriorityFeePerGas, - Method: method, - Params: params, - }, nil + default: + return nil, fmt.Errorf("unsupported signature length") + } } -func (tx *EthTxArgs) ToSignedMessage() (*types.SignedMessage, error) { +func ToSignedFilecoinMessage(tx EthTransaction) (*types.SignedMessage, error) { from, err := tx.Sender() if err != nil { - return nil, xerrors.Errorf("failed to calculate sender: %w", err) + return nil, fmt.Errorf("failed to calculate sender: %w", err) } - unsignedMsg, err := tx.ToUnsignedMessage(from) + unsignedMsg, err := tx.ToUnsignedFilecoinMessage(from) if err != nil { - return nil, xerrors.Errorf("failed to convert to unsigned msg: %w", err) + return nil, fmt.Errorf("failed to convert to unsigned msg: %w", err) } siggy, err := tx.Signature() if err != nil { - return nil, xerrors.Errorf("failed to calculate signature: %w", err) + return nil, fmt.Errorf("failed to calculate signature: %w", err) } return &types.SignedMessage{ @@ -227,443 +209,386 @@ func (tx *EthTxArgs) ToSignedMessage() (*types.SignedMessage, error) { }, nil } -func (tx *EthTxArgs) HashedOriginalRlpMsg() ([]byte, error) { - msg, err := tx.ToRlpUnsignedMsg() - if err != nil { - return nil, err - } - - hasher := sha3.NewLegacyKeccak256() - hasher.Write(msg) - hash := hasher.Sum(nil) - return hash, nil -} - -func (tx *EthTxArgs) ToRlpUnsignedMsg() ([]byte, error) { - packed, err := tx.packTxFields() - if err != nil { - return nil, err +func ParseEthTransaction(data []byte) (EthTransaction, error) { + if len(data) == 0 { + return nil, fmt.Errorf("empty data") } - encoded, err := EncodeRLP(packed) - if err != nil { - return nil, err + switch data[0] { + case 1: + // EIP-2930 + return nil, fmt.Errorf("EIP-2930 transaction is not supported") + case EIP1559TxType: + // EIP-1559 + return parseEip1559Tx(data) + default: + if data[0] > 0x7f { + tx, err := parseLegacyTx(data) + if err != nil { + return nil, fmt.Errorf("failed to parse legacy transaction: %w", err) + } + return tx, nil + } } - return append([]byte{0x02}, encoded...), nil -} -func (tx *EthTx) ToEthTxArgs() EthTxArgs { - return EthTxArgs{ - ChainID: int(tx.ChainID), - Nonce: int(tx.Nonce), - To: tx.To, - Value: big.Int(tx.Value), - MaxFeePerGas: big.Int(tx.MaxFeePerGas), - MaxPriorityFeePerGas: big.Int(tx.MaxPriorityFeePerGas), - GasLimit: int(tx.Gas), - Input: tx.Input, - V: big.Int(tx.V), - R: big.Int(tx.R), - S: big.Int(tx.S), - } + return nil, fmt.Errorf("unsupported transaction type") } -func (tx *EthTx) TxHash() (EthHash, error) { - ethTxArgs := tx.ToEthTxArgs() - return (ðTxArgs).TxHash() +type methodInfo struct { + to address.Address + method abi.MethodNum + params []byte } -func (tx *EthTxArgs) TxHash() (EthHash, error) { - rlp, err := tx.ToRlpSignedMsg() - if err != nil { - return EmptyEthHash, err +func getFilecoinMethodInfo(recipient *EthAddress, input []byte) (*methodInfo, error) { + var params []byte + if len(input) > 0 { + buf := new(bytes.Buffer) + if err := cbg.WriteByteArray(buf, input); err != nil { + return nil, fmt.Errorf("failed to write input args: %w", err) + } + params = buf.Bytes() } - return EthHashFromTxBytes(rlp), nil -} - -func (tx *EthTxArgs) ToRlpSignedMsg() ([]byte, error) { - packed1, err := tx.packTxFields() - if err != nil { - return nil, err - } + var to address.Address + var method abi.MethodNum - packed2, err := tx.packSigFields() - if err != nil { - return nil, err + if recipient == nil { + // If recipient is nil, use Ethereum Address Manager Actor and CreateExternal method + method = builtintypes.MethodsEAM.CreateExternal + to = builtintypes.EthereumAddressManagerActorAddr + } else { + // Otherwise, use InvokeContract method and convert EthAddress to Filecoin address + method = builtintypes.MethodsEVM.InvokeContract + var err error + to, err = recipient.ToFilecoinAddress() + if err != nil { + return nil, fmt.Errorf("failed to convert EthAddress to Filecoin address: %w", err) + } } - encoded, err := EncodeRLP(append(packed1, packed2...)) - if err != nil { - return nil, err - } - return append([]byte{0x02}, encoded...), nil + return &methodInfo{ + to: to, + method: method, + params: params, + }, nil } -func (tx *EthTxArgs) packTxFields() ([]interface{}, error) { - chainId, err := formatInt(tx.ChainID) - if err != nil { - return nil, err - } - - nonce, err := formatInt(tx.Nonce) +func packSigFields(v, r, s big.Int) ([]interface{}, error) { + rr, err := formatBigInt(r) if err != nil { return nil, err } - maxPriorityFeePerGas, err := formatBigInt(tx.MaxPriorityFeePerGas) + ss, err := formatBigInt(s) if err != nil { return nil, err } - maxFeePerGas, err := formatBigInt(tx.MaxFeePerGas) + vv, err := formatBigInt(v) if err != nil { return nil, err } - gasLimit, err := formatInt(tx.GasLimit) - if err != nil { - return nil, err - } + res := []interface{}{vv, rr, ss} + return res, nil +} - value, err := formatBigInt(tx.Value) - if err != nil { - return nil, err +func padLeadingZeros(data []byte, length int) []byte { + if len(data) >= length { + return data } + zeros := make([]byte, length-len(data)) + return append(zeros, data...) +} - res := []interface{}{ - chainId, - nonce, - maxPriorityFeePerGas, - maxFeePerGas, - gasLimit, - formatEthAddr(tx.To), - value, - tx.Input, - []interface{}{}, // access list +func removeLeadingZeros(data []byte) []byte { + firstNonZeroIndex := len(data) + for i, b := range data { + if b > 0 { + firstNonZeroIndex = i + break + } } - return res, nil + return data[firstNonZeroIndex:] } -func (tx *EthTxArgs) packSigFields() ([]interface{}, error) { - r, err := formatBigInt(tx.R) +func formatInt(val int) ([]byte, error) { + buf := new(bytes.Buffer) + err := binary.Write(buf, binary.BigEndian, int64(val)) if err != nil { return nil, err } + return removeLeadingZeros(buf.Bytes()), nil +} - s, err := formatBigInt(tx.S) - if err != nil { - return nil, err +func formatEthAddr(addr *EthAddress) []byte { + if addr == nil { + return nil } + return addr[:] +} - v, err := formatBigInt(tx.V) +func formatBigInt(val big.Int) ([]byte, error) { + b, err := val.Bytes() if err != nil { return nil, err } - - res := []interface{}{v, r, s} - return res, nil + return removeLeadingZeros(b), nil } -func (tx *EthTxArgs) Signature() (*typescrypto.Signature, error) { - r := tx.R.Int.Bytes() - s := tx.S.Int.Bytes() - v := tx.V.Int.Bytes() - - sig := append([]byte{}, padLeadingZeros(r, 32)...) - sig = append(sig, padLeadingZeros(s, 32)...) - if len(v) == 0 { - sig = append(sig, 0) - } else { - sig = append(sig, v[0]) +func parseInt(v interface{}) (int, error) { + data, ok := v.([]byte) + if !ok { + return 0, fmt.Errorf("cannot parse interface to int: input is not a byte array") } - - if len(sig) != 65 { - return nil, fmt.Errorf("signature is not 65 bytes") + if len(data) == 0 { + return 0, nil } - return &typescrypto.Signature{ - Type: typescrypto.SigTypeDelegated, Data: sig, - }, nil -} - -func (tx *EthTxArgs) Sender() (address.Address, error) { - msg, err := tx.ToRlpUnsignedMsg() - if err != nil { - return address.Undef, err + if len(data) > 8 { + return 0, fmt.Errorf("cannot parse interface to int: length is more than 8 bytes") } - - hasher := sha3.NewLegacyKeccak256() - hasher.Write(msg) - hash := hasher.Sum(nil) - - sig, err := tx.Signature() - if err != nil { - return address.Undef, err + var value int64 + r := bytes.NewReader(append(make([]byte, 8-len(data)), data...)) + if err := binary.Read(r, binary.BigEndian, &value); err != nil { + return 0, fmt.Errorf("cannot parse interface to EthUint64: %w", err) } + return int(value), nil +} - pubk, err := gocrypto.EcRecover(hash, sig.Data) - if err != nil { - return address.Undef, err +func parseBigInt(v interface{}) (big.Int, error) { + data, ok := v.([]byte) + if !ok { + return big.Zero(), fmt.Errorf("cannot parse interface to big.Int: input is not a byte array") } - - ethAddr, err := EthAddressFromPubKey(pubk) - if err != nil { - return address.Undef, err + if len(data) == 0 { + return big.Zero(), nil } + var b mathbig.Int + b.SetBytes(data) + return big.NewFromGo(&b), nil +} - ea, err := CastEthAddress(ethAddr) - if err != nil { - return address.Undef, err +func parseBytes(v interface{}) ([]byte, error) { + val, ok := v.([]byte) + if !ok { + return nil, fmt.Errorf("cannot parse interface into bytes: input is not a byte array") } - - return ea.ToFilecoinAddress() + return val, nil } -func RecoverSignature(sig typescrypto.Signature) (r, s, v EthBigInt, err error) { - if sig.Type != typescrypto.SigTypeDelegated { - return EthBigIntZero, EthBigIntZero, EthBigIntZero, fmt.Errorf("RecoverSignature only supports Delegated signature") +func parseEthAddr(v interface{}) (*EthAddress, error) { + b, err := parseBytes(v) + if err != nil { + return nil, err } - - if len(sig.Data) != 65 { - return EthBigIntZero, EthBigIntZero, EthBigIntZero, fmt.Errorf("signature should be 65 bytes long, but got %d bytes", len(sig.Data)) + if len(b) == 0 { + return nil, nil } - - r_, err := parseBigInt(sig.Data[0:32]) + addr, err := CastEthAddress(b) if err != nil { - return EthBigIntZero, EthBigIntZero, EthBigIntZero, fmt.Errorf("cannot parse r into EthBigInt") + return nil, err } + return &addr, nil +} - s_, err := parseBigInt(sig.Data[32:64]) - if err != nil { - return EthBigIntZero, EthBigIntZero, EthBigIntZero, fmt.Errorf("cannot parse s into EthBigInt") +func getEthParamsAndRecipient(msg *types.Message) (params []byte, to *EthAddress, err error) { + if len(msg.Params) > 0 { + paramsReader := bytes.NewReader(msg.Params) + var err error + params, err = cbg.ReadByteArray(paramsReader, uint64(len(msg.Params))) + if err != nil { + return nil, nil, fmt.Errorf("failed to read params byte array: %w", err) + } + if paramsReader.Len() != 0 { + return nil, nil, fmt.Errorf("extra data found in params") + } + if len(params) == 0 { + return nil, nil, fmt.Errorf("non-empty params encode empty byte array") + } } - v_, err := parseBigInt([]byte{sig.Data[64]}) - if err != nil { - return EthBigIntZero, EthBigIntZero, EthBigIntZero, fmt.Errorf("cannot parse v into EthBigInt") + if msg.To == builtintypes.EthereumAddressManagerActorAddr { + if msg.Method != builtintypes.MethodsEAM.CreateExternal { + return nil, nil, fmt.Errorf("unsupported EAM method") + } + } else if msg.Method == builtintypes.MethodsEVM.InvokeContract { + addr, err := EthAddressFromFilecoinAddress(msg.To) + if err != nil { + return nil, nil, err + } + to = &addr + } else { + return nil, nil, + fmt.Errorf("invalid methodnum %d: only allowed method is InvokeContract(%d) or CreateExternal(%d)", + msg.Method, builtintypes.MethodsEVM.InvokeContract, builtintypes.MethodsEAM.CreateExternal) } - return EthBigInt(r_), EthBigInt(s_), EthBigInt(v_), nil + return params, to, nil } -func parseEip1559Tx(data []byte) (*EthTxArgs, error) { - if data[0] != 2 { - return nil, fmt.Errorf("not an EIP-1559 transaction: first byte is not 2") +func parseLegacyTx(data []byte) (EthTransaction, error) { + if data[0] <= 0x7f { + return nil, fmt.Errorf("not a legacy eth transaction") } - d, err := DecodeRLP(data[1:]) + d, err := DecodeRLP(data) if err != nil { return nil, err } decoded, ok := d.([]interface{}) if !ok { - return nil, fmt.Errorf("not an EIP-1559 transaction: decoded data is not a list") + return nil, fmt.Errorf("not a Legacy transaction: decoded data is not a list") } - if len(decoded) != 12 { - return nil, fmt.Errorf("not an EIP-1559 transaction: should have 12 elements in the rlp list") + if len(decoded) != 9 { + return nil, fmt.Errorf("not a Legacy transaction: should have 9 elements in the rlp list") } - chainId, err := parseInt(decoded[0]) + nonce, err := parseInt(decoded[0]) if err != nil { return nil, err } - nonce, err := parseInt(decoded[1]) + gasPrice, err := parseBigInt(decoded[1]) if err != nil { return nil, err } - maxPriorityFeePerGas, err := parseBigInt(decoded[2]) + gasLimit, err := parseInt(decoded[2]) if err != nil { return nil, err } - maxFeePerGas, err := parseBigInt(decoded[3]) + to, err := parseEthAddr(decoded[3]) if err != nil { return nil, err } - gasLimit, err := parseInt(decoded[4]) + value, err := parseBigInt(decoded[4]) if err != nil { return nil, err } - to, err := parseEthAddr(decoded[5]) - if err != nil { - return nil, err + input, ok := decoded[5].([]byte) + if !ok { + return nil, fmt.Errorf("input is not a byte slice") } - value, err := parseBigInt(decoded[6]) + v, err := parseBigInt(decoded[6]) if err != nil { return nil, err } - input, err := parseBytes(decoded[7]) + r, err := parseBigInt(decoded[7]) if err != nil { return nil, err } - accessList, ok := decoded[8].([]interface{}) - if !ok || (ok && len(accessList) != 0) { - return nil, fmt.Errorf("access list should be an empty list") - } - - r, err := parseBigInt(decoded[10]) + s, err := parseBigInt(decoded[8]) if err != nil { return nil, err } - s, err := parseBigInt(decoded[11]) - if err != nil { - return nil, err + tx := &EthLegacyHomesteadTxArgs{ + Nonce: nonce, + GasPrice: gasPrice, + GasLimit: gasLimit, + To: to, + Value: value, + Input: input, + V: v, + R: r, + S: s, } - v, err := parseBigInt(decoded[9]) - if err != nil { - return nil, err + chainId := deriveEIP155ChainId(v) + if chainId.Equals(big.NewInt(0)) { + // This is a legacy Homestead transaction + if !v.Equals(big.NewInt(27)) && !v.Equals(big.NewInt(28)) { + return nil, fmt.Errorf("legacy homestead transactions only support 27 or 28 for v, got %d", v.Uint64()) + } + return tx, nil } - // EIP-1559 and EIP-2930 transactions only support 0 or 1 for v - // Legacy and EIP-155 transactions support other values - // https://github.com/ethers-io/ethers.js/blob/56fabe987bb8c1e4891fdf1e5d3fe8a4c0471751/packages/transactions/src.ts/index.ts#L333 - if !v.Equals(big.NewInt(0)) && !v.Equals(big.NewInt(1)) { - return nil, fmt.Errorf("EIP-1559 transactions only support 0 or 1 for v") + // This is a EIP-155 transaction -> ensure chainID protection + if err := validateEIP155ChainId(v); err != nil { + return nil, fmt.Errorf("failed to validate EIP155 chain id: %w", err) } - args := EthTxArgs{ - ChainID: chainId, - Nonce: nonce, - To: to, - MaxPriorityFeePerGas: maxPriorityFeePerGas, - MaxFeePerGas: maxFeePerGas, - GasLimit: gasLimit, - Value: value, - Input: input, - V: v, - R: r, - S: s, - } - return &args, nil + return &EthLegacy155TxArgs{ + legacyTx: tx, + }, nil } -func ParseEthTxArgs(data []byte) (*EthTxArgs, error) { - if len(data) == 0 { - return nil, fmt.Errorf("empty data") - } - - if data[0] > 0x7f { - // legacy transaction - return nil, fmt.Errorf("legacy transaction is not supported") - } - - if data[0] == 1 { - // EIP-2930 - return nil, fmt.Errorf("EIP-2930 transaction is not supported") - } - - if data[0] == Eip1559TxType { - // EIP-1559 - return parseEip1559Tx(data) - } - - return nil, fmt.Errorf("unsupported transaction type") +type RlpPackable interface { + packTxFields() ([]interface{}, error) } -func padLeadingZeros(data []byte, length int) []byte { - if len(data) >= length { - return data - } - zeros := make([]byte, length-len(data)) - return append(zeros, data...) -} - -func removeLeadingZeros(data []byte) []byte { - firstNonZeroIndex := len(data) - for i, b := range data { - if b > 0 { - firstNonZeroIndex = i - break - } +func toRlpUnsignedMsg(tx RlpPackable) ([]byte, error) { + packedFields, err := tx.packTxFields() + if err != nil { + return nil, err } - return data[firstNonZeroIndex:] -} - -func formatInt(val int) ([]byte, error) { - buf := new(bytes.Buffer) - err := binary.Write(buf, binary.BigEndian, int64(val)) + encoded, err := EncodeRLP(packedFields) if err != nil { return nil, err } - return removeLeadingZeros(buf.Bytes()), nil + return encoded, nil } -func formatEthAddr(addr *EthAddress) []byte { - if addr == nil { - return nil +func toRlpSignedMsg(tx RlpPackable, V, R, S big.Int) ([]byte, error) { + packed1, err := tx.packTxFields() + if err != nil { + return nil, err } - return addr[:] -} -func formatBigInt(val big.Int) ([]byte, error) { - b, err := val.Bytes() + packed2, err := packSigFields(V, R, S) if err != nil { return nil, err } - return removeLeadingZeros(b), nil -} -func parseInt(v interface{}) (int, error) { - data, ok := v.([]byte) - if !ok { - return 0, fmt.Errorf("cannot parse interface to int: input is not a byte array") - } - if len(data) == 0 { - return 0, nil - } - if len(data) > 8 { - return 0, fmt.Errorf("cannot parse interface to int: length is more than 8 bytes") - } - var value int64 - r := bytes.NewReader(append(make([]byte, 8-len(data)), data...)) - if err := binary.Read(r, binary.BigEndian, &value); err != nil { - return 0, fmt.Errorf("cannot parse interface to EthUint64: %w", err) + encoded, err := EncodeRLP(append(packed1, packed2...)) + if err != nil { + return nil, fmt.Errorf("failed to encode rlp signed msg: %w", err) } - return int(value), nil + return encoded, nil } -func parseBigInt(v interface{}) (big.Int, error) { - data, ok := v.([]byte) - if !ok { - return big.Zero(), fmt.Errorf("cannot parse interface to big.Int: input is not a byte array") +func sender(tx EthTransaction) (address.Address, error) { + msg, err := tx.ToRlpUnsignedMsg() + if err != nil { + return address.Undef, fmt.Errorf("failed to get rlp unsigned msg: %w", err) } - if len(data) == 0 { - return big.Zero(), nil + + hasher := sha3.NewLegacyKeccak256() + hasher.Write(msg) + hash := hasher.Sum(nil) + + sig, err := tx.Signature() + if err != nil { + return address.Undef, fmt.Errorf("failed to get signature: %w", err) } - var b mathbig.Int - b.SetBytes(data) - return big.NewFromGo(&b), nil -} -func parseBytes(v interface{}) ([]byte, error) { - val, ok := v.([]byte) - if !ok { - return nil, fmt.Errorf("cannot parse interface into bytes: input is not a byte array") + sigData, err := tx.ToVerifiableSignature(sig.Data) + if err != nil { + return address.Undef, fmt.Errorf("failed to get verifiable signature: %w", err) } - return val, nil -} -func parseEthAddr(v interface{}) (*EthAddress, error) { - b, err := parseBytes(v) + pubk, err := gocrypto.EcRecover(hash, sigData) if err != nil { - return nil, err + return address.Undef, fmt.Errorf("failed to recover pubkey: %w", err) } - if len(b) == 0 { - return nil, nil + + ethAddr, err := EthAddressFromPubKey(pubk) + if err != nil { + return address.Undef, fmt.Errorf("failed to get eth address from pubkey: %w", err) } - addr, err := CastEthAddress(b) + + ea, err := CastEthAddress(ethAddr) if err != nil { - return nil, err + return address.Undef, fmt.Errorf("failed to cast eth address: %w", err) } - return &addr, nil + + return ea.ToFilecoinAddress() } diff --git a/chain/types/ethtypes/eth_transactions_test.go b/chain/types/ethtypes/eth_transactions_test.go index 68abc55dd49..21e1f047542 100644 --- a/chain/types/ethtypes/eth_transactions_test.go +++ b/chain/types/ethtypes/eth_transactions_test.go @@ -1,251 +1,167 @@ package ethtypes import ( - "bytes" - "encoding/hex" - "encoding/json" - "fmt" "testing" "github.com/stretchr/testify/require" - "golang.org/x/crypto/sha3" "github.com/filecoin-project/go-address" - gocrypto "github.com/filecoin-project/go-crypto" - actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/big" builtintypes "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v10/evm" - init10 "github.com/filecoin-project/go-state-types/builtin/v10/init" - crypto1 "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/lib/sigs" - _ "github.com/filecoin-project/lotus/lib/sigs/delegated" + "github.com/filecoin-project/lotus/chain/types" ) -type TxTestcase struct { - TxJSON string - NosigTx string - Input EthBytes - Output EthTxArgs -} - -func TestTxArgs(t *testing.T) { - testcases, err := prepareTxTestcases() - require.Nil(t, err) - require.NotEmpty(t, testcases) - - for i, tc := range testcases { - comment := fmt.Sprintf("case %d: \n%s\n%s", i, tc.TxJSON, hex.EncodeToString(tc.Input)) - - // parse txargs - txArgs, err := ParseEthTxArgs(tc.Input) - require.NoError(t, err, comment) - - msgRecovered, err := txArgs.ToRlpUnsignedMsg() - require.NoError(t, err, comment) - require.Equal(t, tc.NosigTx, "0x"+hex.EncodeToString(msgRecovered), comment) - - // verify signatures - from, err := txArgs.Sender() - require.NoError(t, err, comment) - - smsg, err := txArgs.ToSignedMessage() - require.NoError(t, err, comment) - - err = sigs.Verify(&smsg.Signature, from, msgRecovered) - require.NoError(t, err, comment) - - // verify data - require.Equal(t, tc.Output.ChainID, txArgs.ChainID, comment) - require.Equal(t, tc.Output.Nonce, txArgs.Nonce, comment) - require.Equal(t, tc.Output.To, txArgs.To, comment) - } -} - -func TestSignatures(t *testing.T) { - testcases := []struct { - RawTx string - ExpectedR string - ExpectedS string - ExpectedV string - ExpectErr bool - }{ - { - "0x02f8598401df5e76028301d69083086a5e835532dd808080c080a0457e33227ac7ceee2ef121755e26b872b6fb04221993f9939349bb7b0a3e1595a02d8ef379e1d2a9e30fa61c92623cc9ed72d80cf6a48cfea341cb916bcc0a81bc", - `"0x457e33227ac7ceee2ef121755e26b872b6fb04221993f9939349bb7b0a3e1595"`, - `"0x2d8ef379e1d2a9e30fa61c92623cc9ed72d80cf6a48cfea341cb916bcc0a81bc"`, - `"0x0"`, - false, - }, - { - "0x02f8598401df5e76038301d69083086a5e835532dd808080c001a012a232866dcb0671eb0ddc01fb9c01d6ef384ec892bb29691ed0d2d293052ddfa052a6ae38c6139930db21a00eee2a4caced9a6500991b823d64ec664d003bc4b1", - `"0x12a232866dcb0671eb0ddc01fb9c01d6ef384ec892bb29691ed0d2d293052ddf"`, - `"0x52a6ae38c6139930db21a00eee2a4caced9a6500991b823d64ec664d003bc4b1"`, - `"0x1"`, - false, - }, - { - "0x00", - `""`, - `""`, - `""`, - true, - }, - } - - for _, tc := range testcases { - tx, err := ParseEthTxArgs(mustDecodeHex(tc.RawTx)) - if tc.ExpectErr { - require.Error(t, err) - continue - } - require.Nil(t, err) - - sig, err := tx.Signature() - require.Nil(t, err) - - r, s, v, err := RecoverSignature(*sig) - require.Nil(t, err) - - marshaledR, err := r.MarshalJSON() - require.Nil(t, err) - - marshaledS, err := s.MarshalJSON() - require.Nil(t, err) - - marshaledV, err := v.MarshalJSON() - require.Nil(t, err) - - require.Equal(t, tc.ExpectedR, string(marshaledR)) - require.Equal(t, tc.ExpectedS, string(marshaledS)) - require.Equal(t, tc.ExpectedV, string(marshaledV)) - } -} - -func TestTransformParams(t *testing.T) { - constructorParams, err := actors.SerializeParams(&evm.ConstructorParams{ - Initcode: mustDecodeHex("0x1122334455"), - }) - require.Nil(t, err) - - evmActorCid, ok := actors.GetActorCodeID(actorstypes.Version10, "reward") - require.True(t, ok) - - params, err := actors.SerializeParams(&init10.ExecParams{ - CodeCID: evmActorCid, - ConstructorParams: constructorParams, - }) - require.Nil(t, err) +func TestEthTransactionFromSignedFilecoinMessage(t *testing.T) { + eip1559sig := make([]byte, 65) + eip1559sig[0] = 1 - var exec init10.ExecParams - reader := bytes.NewReader(params) - err1 := exec.UnmarshalCBOR(reader) - require.Nil(t, err1) + legacySig := make([]byte, 66) + legacySig[0] = 1 + legacySig[65] = 27 - var evmParams evm.ConstructorParams - reader1 := bytes.NewReader(exec.ConstructorParams) - err1 = evmParams.UnmarshalCBOR(reader1) - require.Nil(t, err1) - - require.Equal(t, mustDecodeHex("0x1122334455"), evmParams.Initcode) -} - -func TestEcRecover(t *testing.T) { - rHex := "0x479ff7fa64cf8bf641eb81635d1e8a698530d2f219951d234539e6d074819529" - sHex := "0x4b6146d27be50cdbb2853ba9a42f207af8d730272f1ebe9c9a78aeef1d6aa924" - fromHex := "0x3947D223fc5415f43ea099866AB62B1d4D33814D" - v := byte(0) - - msgHex := "0x02f1030185012a05f2008504a817c800825208942b87d1cb599bc2a606db9a0169fcec96af04ad3a880de0b6b3a764000080c0" - pubKeyHex := "0x048362749392a0e192eff600d21155236c5a0648d300a8e0e44d8617712c7c96384c75825dc5c7595df2a5005fd8a0f7c809119fb9ab36403ed712244fc329348e" - - msg := mustDecodeHex(msgHex) - pubKey := mustDecodeHex(pubKeyHex) - r := mustDecodeHex(rHex) - s := mustDecodeHex(sHex) - from := mustDecodeHex(fromHex) - - sig := append(r, s...) - sig = append(sig, v) - require.Equal(t, 65, len(sig)) - - sha := sha3.NewLegacyKeccak256() - sha.Write(msg) - h := sha.Sum(nil) - - pubk, err := gocrypto.EcRecover(h, sig) - require.Nil(t, err) - require.Equal(t, pubKey, pubk) - - sha.Reset() - sha.Write(pubk[1:]) - h = sha.Sum(nil) - h = h[len(h)-20:] - - require.Equal(t, from, h) -} - -func TestDelegatedSigner(t *testing.T) { - rHex := "0xcf1fa52fae9154ba21d67aeca9b42adfe186eb9e426c441051a8473efd190848" - sHex := "0x0e6c8c79ffaf35fb8f136c8cf6c5656f1f3befad21f2644321aa6dba58d68737" - v := byte(0) - - msgHex := "0x02f08401df5e76038502540be400843b9aca008398968094ff000000000000000000000000000000000003f2832dc6c080c0" pubKeyHex := "0x04cfecc0520d906cbfea387759246e89d85e2998843e56ad1c41de247ce10b3e4c453aa73c8de13c178d94461b6fa3f8b6f74406ce43d2fbab6992d0b283394242" - - msg := mustDecodeHex(msgHex) pubk := mustDecodeHex(pubKeyHex) - r := mustDecodeHex(rHex) - s := mustDecodeHex(sHex) - addrHash, err := EthAddressFromPubKey(pubk) require.NoError(t, err) - from, err := address.NewDelegatedAddress(builtintypes.EthereumAddressManagerActorID, addrHash) require.NoError(t, err) - sig := append(r, s...) - sig = append(sig, v) - require.Equal(t, 65, len(sig)) - - signature := &crypto1.Signature{ - Type: crypto1.SigTypeDelegated, - Data: sig, - } - - err = sigs.Verify(signature, from, msg) + fromEth, err := EthAddressFromFilecoinAddress(from) require.NoError(t, err) -} -func prepareTxTestcases() ([]TxTestcase, error) { - tcstr := `[{"input":"0x02f86282013a8080808094ff000000000000000000000000000000000003ec8080c080a0f411a73e33523b40c1a916e79e67746bd01a4a4fb4ecfa87b441375a215ddfb4a0551692c1553574fab4c227ca70cb1c121dc3a2ef82179a9c984bd7acc0880a38","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02df82013a8080808094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86382013a81c880808094ff000000000000000000000000000000000003ec8080c001a0ed75a56e365c88479bf3f60251a2dd47ae181f1a3d95724581a3f648487b4396a046628bb9734edf4b4c455f2bbd351e43c466f315272cd1927f2c55d9b52e058b","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e082013a81c880808094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86282013a8080808094ff000000000000000000000000000000000003ec8080c080a0f411a73e33523b40c1a916e79e67746bd01a4a4fb4ecfa87b441375a215ddfb4a0551692c1553574fab4c227ca70cb1c121dc3a2ef82179a9c984bd7acc0880a38","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02df82013a8080808094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86382013a81c880808094ff000000000000000000000000000000000003ec8080c001a0ed75a56e365c88479bf3f60251a2dd47ae181f1a3d95724581a3f648487b4396a046628bb9734edf4b4c455f2bbd351e43c466f315272cd1927f2c55d9b52e058b","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e082013a81c880808094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f88682013a8080808094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a0706d871013403cf8b965dfa7f2be5a4d185d746da45b21d5a67c667c26d255d6a02e68a14f386aa325ce8e82d30405107d53103d038cf20e40af961ef3a3963608","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84382013a8080808094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88782013a81c880808094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a0df137d0a6733354b2f2419a4ea5fe77d333deca28b2fe091d76190b51c2bae73a0232cbf9c29b8840cbf104ff77360fbf3ca4acda29b5e230636e19ac253ad92de","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84482013a81c880808094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86482013a808082ea608094ff000000000000000000000000000000000003ec8080c001a03a2880cc65e88d5320067f502a0ffda72111d01f0ebeeea9fbeb812e457aa0f9a020c08483b104dbfbbbffffedc3acdbe8245ca6daf97c0dbab843d747e587d625","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e182013a808082ea608094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86582013a81c88082ea608094ff000000000000000000000000000000000003ec8080c001a03427daf1639de6bf1b948abeab765b0a6a9170cc6a16d263c71c859f78916b03a01bbbb824b9953b5eb9f3098b4358a7ebb78f3358866eed997de66350ae4c9475","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e282013a81c88082ea608094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86482013a808082ea608094ff000000000000000000000000000000000003ec8080c001a03a2880cc65e88d5320067f502a0ffda72111d01f0ebeeea9fbeb812e457aa0f9a020c08483b104dbfbbbffffedc3acdbe8245ca6daf97c0dbab843d747e587d625","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e182013a808082ea608094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86582013a81c88082ea608094ff000000000000000000000000000000000003ec8080c001a03427daf1639de6bf1b948abeab765b0a6a9170cc6a16d263c71c859f78916b03a01bbbb824b9953b5eb9f3098b4358a7ebb78f3358866eed997de66350ae4c9475","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e282013a81c88082ea608094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f88882013a808082ea608094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0b9ebc36653a4800816f71ceacf93a1ee601a136916a3476ea9073a9a55ff026aa0647665249b12e8d1d1773b91844588ed70f65c91bc088ccb259ec0f0a24330d5","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84582013a808082ea608094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88982013a81c88082ea608094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0122dd8468dbd34e111e1a5ea1997199be633aa3bc9c1a7ee27dc3a8eda39c29da07cb99cd28ac67f55e507a8b8ef5b931c56cacf79273a4a2969a004a4b4a2864a","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84682013a81c88082ea608094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86482013a8082ea60808094ff000000000000000000000000000000000003ec8080c080a0c1d020df63cb6db76e3a27a60ba0500a3cdd30f9f47b08733009dc8d610ea29ba05cbafb4c223417526ded0b02b8eb66a73535386d0e62da0e20f3641b532aa406","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e182013a8082ea60808094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86582013a81c882ea60808094ff000000000000000000000000000000000003ec8080c080a090e30d32c6cd3f1ba2109b6a9f1c9fffc50b96a934192edf98adc086299e410ba057db0c136436de2e907942bdaad8e0113cf576f250b336ab652ef094c260dae6","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e282013a81c882ea60808094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86482013a8082ea60808094ff000000000000000000000000000000000003ec8080c080a0c1d020df63cb6db76e3a27a60ba0500a3cdd30f9f47b08733009dc8d610ea29ba05cbafb4c223417526ded0b02b8eb66a73535386d0e62da0e20f3641b532aa406","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e182013a8082ea60808094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86582013a81c882ea60808094ff000000000000000000000000000000000003ec8080c080a090e30d32c6cd3f1ba2109b6a9f1c9fffc50b96a934192edf98adc086299e410ba057db0c136436de2e907942bdaad8e0113cf576f250b336ab652ef094c260dae6","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e282013a81c882ea60808094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f88882013a8082ea60808094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a016e3f30a612fc802bb64b765325ecf78f2769b879a9acf62f07669f9723335d6a0781bb3444a73819f28233f1eebf8c3a4de288842fd73c2e05a7a7b0c288d5b25","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84582013a8082ea60808094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88982013a81c882ea60808094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a0b652a447bdcdd1906ed86406ee543ee06023e4f762784c1d3aaf4c3bd85c6a17a0368ae9995e15258f14b74f937e97140a659d052d341674be0c24452257b56b30","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84682013a81c882ea60808094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86682013a8082ea6082ea608094ff000000000000000000000000000000000003ec8080c001a0b1411f337b69609a256c0e76c57ccf4af87e977c98fd2a889f29281bf623cab4a049bec0fb4773aed870bae9c1cdf1ee398c498f0b436dcd19cae588b4ecd8bdf2","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e382013a8082ea6082ea608094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86782013a81c882ea6082ea608094ff000000000000000000000000000000000003ec8080c080a00b845fec9c96bf593c3501753764e14867d3f5d4bd02051e49329b6810d6513ea070d046e5b38c18c542594b328f02345a8f34ab05fd00db33974f914f7ae31c63","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e482013a81c882ea6082ea608094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86682013a8082ea6082ea608094ff000000000000000000000000000000000003ec8080c001a0b1411f337b69609a256c0e76c57ccf4af87e977c98fd2a889f29281bf623cab4a049bec0fb4773aed870bae9c1cdf1ee398c498f0b436dcd19cae588b4ecd8bdf2","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e382013a8082ea6082ea608094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86782013a81c882ea6082ea608094ff000000000000000000000000000000000003ec8080c080a00b845fec9c96bf593c3501753764e14867d3f5d4bd02051e49329b6810d6513ea070d046e5b38c18c542594b328f02345a8f34ab05fd00db33974f914f7ae31c63","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e482013a81c882ea6082ea608094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f88a82013a8082ea6082ea608094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a02d8215d8408d2f4b83a2e68f4aad6fe5dee97d7ef6a43b02ec413ead2215ac80a0641a43cebd6905e3e324c0dd06585d5ffc9b971b519045999c48e31db7aa7f9d","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84782013a8082ea6082ea608094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88a82013a81c882ea6082ea608094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0da68784e191ce0806527d389f84b5d15bed3908e1c2cc0d8f0cea7a29eb0dba39f231a0b438b7d0f0f57292c68dc174d4ee6df7add933ab4e0b3789f597a7d3b","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84882013a81c882ea6082ea608094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86482013a80808082ea6094ff000000000000000000000000000000000003ec8080c080a04c97162e2d2ab508116a23c522fd816ecd9cb091d4c288afe45c37ee3a8dde34a06ebf67ff15b74d65c276340aaebde8e6ebb8da0d3bbab43deffac8eb1e6a0630","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e182013a80808082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86582013a81c8808082ea6094ff000000000000000000000000000000000003ec8080c080a0d503d409e667c2876ab9e420854cecce4c0092985855234be07f270bfcf3ed4aa07a40deecc8a4448d4dc0e2014b4b23ac5721409c62bffa05aee6938d8447f72d","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e282013a81c8808082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86482013a80808082ea6094ff000000000000000000000000000000000003ec8080c080a04c97162e2d2ab508116a23c522fd816ecd9cb091d4c288afe45c37ee3a8dde34a06ebf67ff15b74d65c276340aaebde8e6ebb8da0d3bbab43deffac8eb1e6a0630","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e182013a80808082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86582013a81c8808082ea6094ff000000000000000000000000000000000003ec8080c080a0d503d409e667c2876ab9e420854cecce4c0092985855234be07f270bfcf3ed4aa07a40deecc8a4448d4dc0e2014b4b23ac5721409c62bffa05aee6938d8447f72d","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e282013a81c8808082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f88882013a80808082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a059aecc1d365ee0dc56a577d162f04c0912a5c5b62f889cff1acc706ac17a4489a017209b3ec43a10a40c5863a2b7a1ee823380ad42697a5f7d5f537c230583a4c7","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84582013a80808082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88982013a81c8808082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a0dc1eb40f93e311f3f9a94d8a695db2bbb38973ce097121875885e4bc54f18152a0075da0bd405bb4f5c69034daaf8f40052b941fae5b9f3b8df218d80fb4d7ea99","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84682013a81c8808082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86682013a808082ea6082ea6094ff000000000000000000000000000000000003ec8080c080a03d392fd5e83c64554907a55204572aaeec6ffab25f2c73655c6a22344fa02a14a03b9ae94b7dc21108db6dda65125ecaff844f8f43f483bed35f32f6d5d530fe9f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e382013a808082ea6082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86782013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec8080c001a0405e8a430ef6ad4c3403150776af08c255b6f6fbe278d194f88517733c816caca0364203b5bca7953dd863d4cf90c0a77b499ef4a3d5831c4fdf33926c31709c4f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e482013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86682013a808082ea6082ea6094ff000000000000000000000000000000000003ec8080c080a03d392fd5e83c64554907a55204572aaeec6ffab25f2c73655c6a22344fa02a14a03b9ae94b7dc21108db6dda65125ecaff844f8f43f483bed35f32f6d5d530fe9f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e382013a808082ea6082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86782013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec8080c001a0405e8a430ef6ad4c3403150776af08c255b6f6fbe278d194f88517733c816caca0364203b5bca7953dd863d4cf90c0a77b499ef4a3d5831c4fdf33926c31709c4f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e482013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f88a82013a808082ea6082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a083cf6701aee00872946b6550c059f028f72e3052acb8cc9c25b830ace860e046a03fd969d73e995d43896659f94d3956a17da18451050349e7db6f7881f8c057d3","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84782013a808082ea6082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88b82013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0c5a545f2d94e719068d9a43b01879bcb46b56e236dd378dd26ef3b8e4ec8314aa04024b9936960b9b156405e4f3e0b6562518df8778324a927381e380b23f47fb8","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84882013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86682013a8082ea608082ea6094ff000000000000000000000000000000000003ec8080c080a0aa406ec7f4901a1777e44b975ff41603b9d46257efdc1ca904a3e7890f2b020ea03bda5c785182cfa2d9f9b7a54f194cd08b9d0f913069a4514ff21e8fa0ef3850","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e382013a8082ea608082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86782013a81c882ea608082ea6094ff000000000000000000000000000000000003ec8080c080a089fc465c24b4bad898cf900f585eddab6d40189e8d19746da76597f86fbadf51a005732ffa2ebac36646afab9105540b543f74a5c91b441834a2b1930815c2ccc8","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e482013a81c882ea608082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86682013a8082ea608082ea6094ff000000000000000000000000000000000003ec8080c080a0aa406ec7f4901a1777e44b975ff41603b9d46257efdc1ca904a3e7890f2b020ea03bda5c785182cfa2d9f9b7a54f194cd08b9d0f913069a4514ff21e8fa0ef3850","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e382013a8082ea608082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86782013a81c882ea608082ea6094ff000000000000000000000000000000000003ec8080c080a089fc465c24b4bad898cf900f585eddab6d40189e8d19746da76597f86fbadf51a005732ffa2ebac36646afab9105540b543f74a5c91b441834a2b1930815c2ccc8","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e482013a81c882ea608082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f88a82013a8082ea608082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a09d9a8ee802486b826348a76346987b3e7331d70ef0c0257ff976ceebef1141a2a07d97d14ed877c16bd932f08a67c374e773ee3337d512ff8241c8d78566a04d46","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84782013a8082ea608082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88b82013a81c882ea608082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a024ad1ec1578f51beb2b574507bda7691a486cdbc9c22add01ad4c1f686beb567a048445e0fe8945b8052e5e87139690c0615a11c52503b226cf23610c999eada40","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84882013a81c882ea608082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86882013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec8080c080a06b382fcbe48de85615ff6e2dcc0c84021beb4abc527878accd36c9c77af84ba8a06a07d34a6896b270538525cb14b0856ceb442714fa85e4c9ee36dedf638935f9","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e582013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86982013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec8080c080a0ba2586cfb3323fd0f9d7bb38bf9948758a52f156bda66f7100b789760894ad89a01e4bd2ff4eff2c391915141250313ab845401d5e2f71c23691d20a0b3c68cbd9","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e682013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86882013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec8080c080a06b382fcbe48de85615ff6e2dcc0c84021beb4abc527878accd36c9c77af84ba8a06a07d34a6896b270538525cb14b0856ceb442714fa85e4c9ee36dedf638935f9","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e582013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f86982013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec8080c080a0ba2586cfb3323fd0f9d7bb38bf9948758a52f156bda66f7100b789760894ad89a01e4bd2ff4eff2c391915141250313ab845401d5e2f71c23691d20a0b3c68cbd9","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e682013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec8080c0"},{"input":"0x02f88c82013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a0f36ff02ab3e90d2de77cdb24423dc39ca5c959429db62cb5c9ed4f0c9e04703aa0476bf841b0602af44039801d4e68648971f63fc2152002b127be6d914d4fc5ca","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84982013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88d82013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a08267ae8838a8a5d9c2a761c182b5759184b7672b761278d499c1514fb6e8a495a023aa268f67da7728767e114fdec4d141bf649e0ad931117b5b325834dbf72803","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"0\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84a82013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec80a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86282013a8080808094ff000000000000000000000000000000000003ec6480c080a011ec4af7fc663080460b70ae8829f47e9cfa1814c616750d359459cbbba55563a0446e4ec9ea504d13dcbef44238e442caad366dbae1ae9408d39c6d902a5577b0","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02df82013a8080808094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86382013a81c880808094ff000000000000000000000000000000000003ec6480c001a0b80bc30bef46b3f824d1460685db875ff070f7798c3148c1fc49c01d6acc550ca0437efe7721563800e6a56ac54877a72c7860cd5e17ef4675afe989822ae87759","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e082013a81c880808094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86282013a8080808094ff000000000000000000000000000000000003ec6480c080a011ec4af7fc663080460b70ae8829f47e9cfa1814c616750d359459cbbba55563a0446e4ec9ea504d13dcbef44238e442caad366dbae1ae9408d39c6d902a5577b0","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02df82013a8080808094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86382013a81c880808094ff000000000000000000000000000000000003ec6480c001a0b80bc30bef46b3f824d1460685db875ff070f7798c3148c1fc49c01d6acc550ca0437efe7721563800e6a56ac54877a72c7860cd5e17ef4675afe989822ae87759","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e082013a81c880808094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f88682013a8080808094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a06ab9d5988105d28dd090e509c8caabaa7773fc08ec5ef3dfeae532e01938ff69a078bca296df26dd2497a49110e138a49a67a6e232a35524b041d04a10fc583651","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84382013a8080808094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88782013a81c880808094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a031d51b866a02a9966250d312ed6cb4e083f9131ad8f6bb5814074375093d7536a03f8f819c4011dd54348930b6f98f365de8060b487ada38a62a5617aab6cc6e09","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84482013a81c880808094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86482013a808082ea608094ff000000000000000000000000000000000003ec6480c001a05bda5ad44c8f9a7516226488cf2d4f53188b40352f35ea7cece8076acda26dbba015373b3b78c88b74c7cca32fd02696a248bb9bea22a09c7a4a17b9e3b629b896","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e182013a808082ea608094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86582013a81c88082ea608094ff000000000000000000000000000000000003ec6480c080a00d92624cc3335c903077e318204929b4a8c9cd96d94690b0191f8a3bb24e937aa02f1d0315ececf46900154791a732eb8fee9efd0dc998a4e6b892d07ad657a815","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e282013a81c88082ea608094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86482013a808082ea608094ff000000000000000000000000000000000003ec6480c001a05bda5ad44c8f9a7516226488cf2d4f53188b40352f35ea7cece8076acda26dbba015373b3b78c88b74c7cca32fd02696a248bb9bea22a09c7a4a17b9e3b629b896","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e182013a808082ea608094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86582013a81c88082ea608094ff000000000000000000000000000000000003ec6480c080a00d92624cc3335c903077e318204929b4a8c9cd96d94690b0191f8a3bb24e937aa02f1d0315ececf46900154791a732eb8fee9efd0dc998a4e6b892d07ad657a815","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e282013a81c88082ea608094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f88882013a808082ea608094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0def168136c0532ec148a9e200e3cc1b22f90c7bbc5d9ef25ac0c5d342e8f3784a022f94642dfc81ba321b3e09879888332fa7c25b623bead7686e3e493c0911b55","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84582013a808082ea608094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88982013a81c88082ea608094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0626f43b80260f84cde2c67538c5cfbd328ce85b0f934e8568769e51709b100a7a0283fff5dbfde72b72e2b74c464b1add985d72750be3f4e16ae8ffb4747a40ff2","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84682013a81c88082ea608094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86482013a8082ea60808094ff000000000000000000000000000000000003ec6480c080a051b109080002dab4aae47139eb92ddea8951ef5ac6dfc3d7fa07621047dbc680a0334aa47a2888a6cc52b8cf3c3635192b66c692416e954822c1c93c3896ff1ead","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e182013a8082ea60808094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86582013a81c882ea60808094ff000000000000000000000000000000000003ec6480c080a009e179e3bad2da6fb5e205e52fd8d1c462007162aabde5a4d6b052dd4fc4f23ca063922c31438835adf2e4424e2e7d5d2702ec65de2e24a72b491ff0004a53865d","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e282013a81c882ea60808094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86482013a8082ea60808094ff000000000000000000000000000000000003ec6480c080a051b109080002dab4aae47139eb92ddea8951ef5ac6dfc3d7fa07621047dbc680a0334aa47a2888a6cc52b8cf3c3635192b66c692416e954822c1c93c3896ff1ead","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e182013a8082ea60808094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86582013a81c882ea60808094ff000000000000000000000000000000000003ec6480c080a009e179e3bad2da6fb5e205e52fd8d1c462007162aabde5a4d6b052dd4fc4f23ca063922c31438835adf2e4424e2e7d5d2702ec65de2e24a72b491ff0004a53865d","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e282013a81c882ea60808094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f88882013a8082ea60808094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0d3bfebc6597304c6a06491f68d2ac149fc233d28e81af48dd5b1f83e6ff951d2a06668da06d86aba341971dabb58016ca7764cd4b4c1634e3f829dcc8ef8bca4f6","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84582013a8082ea60808094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88982013a81c882ea60808094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a0d45b9fd9a2a3fdf79805cf73b70348037cc69927209a5e3728fe62cbe9543f03a02f5f8477666487ee5148a65ce59f400beac7c208369162b2d555411314d358fb","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84682013a81c882ea60808094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86682013a8082ea6082ea608094ff000000000000000000000000000000000003ec6480c001a02a6a910f7b5f83fda937006021b9c074f4544d5bb37b9b5a1b7045095f461836a038572b25418528bce7e6a3a480cf9fc90a33d9c63b392c2dbc8faf72a1e4ab8f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e382013a8082ea6082ea608094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86782013a81c882ea6082ea608094ff000000000000000000000000000000000003ec6480c080a07a6dd661b5da27c809cce22aa186c158fe3b07a484a9395fd9a7a31a2b90636fa02b86f82b661264e27c3fda085b59740d3059335bff91693291afcf93c7ca627c","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e482013a81c882ea6082ea608094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86682013a8082ea6082ea608094ff000000000000000000000000000000000003ec6480c001a02a6a910f7b5f83fda937006021b9c074f4544d5bb37b9b5a1b7045095f461836a038572b25418528bce7e6a3a480cf9fc90a33d9c63b392c2dbc8faf72a1e4ab8f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e382013a8082ea6082ea608094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86782013a81c882ea6082ea608094ff000000000000000000000000000000000003ec6480c080a07a6dd661b5da27c809cce22aa186c158fe3b07a484a9395fd9a7a31a2b90636fa02b86f82b661264e27c3fda085b59740d3059335bff91693291afcf93c7ca627c","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e482013a81c882ea6082ea608094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f88a82013a8082ea6082ea608094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a08c13c10490bc20cb1e55dc54ececb37a6c9cc8d013dbe513feacbb0416f09feba045c4e038759a0901820091e043db326b1bf9a8a1cd046ac72629969497c6a86f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84782013a8082ea6082ea608094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88b82013a81c882ea6082ea608094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0b904edf8eb9b6beb9cde9e1fae538e12f8d40e9124ace0cba2eee8cbbe77aa10a0788a0bd9a6fb98e7230f5db89be2f5067d1a227ba277b9cb155fb5859c57aae6","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84882013a81c882ea6082ea608094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86482013a80808082ea6094ff000000000000000000000000000000000003ec6480c080a08d10a7a81c561391fe88bcb2c1dfbf4f7140fb7884fec0558606e76ffc4eaa91a049fa2a95e0f07a4376df9c6f2e1563ad443ce8369d44c6e1ce8ee521805b3623","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e182013a80808082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86582013a81c8808082ea6094ff000000000000000000000000000000000003ec6480c001a00de6dc2841a25e5ea2dc1e054d69638ec519a9953666930060797cd110cde122a07fd1dcb6319eca7c681cef006efb3f7dcd74ff98a79ce05917d5d1fa7a175b6f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e282013a81c8808082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86482013a80808082ea6094ff000000000000000000000000000000000003ec6480c080a08d10a7a81c561391fe88bcb2c1dfbf4f7140fb7884fec0558606e76ffc4eaa91a049fa2a95e0f07a4376df9c6f2e1563ad443ce8369d44c6e1ce8ee521805b3623","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e182013a80808082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86582013a81c8808082ea6094ff000000000000000000000000000000000003ec6480c001a00de6dc2841a25e5ea2dc1e054d69638ec519a9953666930060797cd110cde122a07fd1dcb6319eca7c681cef006efb3f7dcd74ff98a79ce05917d5d1fa7a175b6f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e282013a81c8808082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f88882013a80808082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a04c43dab94dd746973a1f7f051cc520cc01e93e9c6c55147cef34e5fdc0b182a2a06d148cc6ec017f9aeb6442a17d72e388ffc835950e19abd0c06057520f893542","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84582013a80808082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88982013a81c8808082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a025b50c1db31c0ae7aaa73374659201b54b71488efecbb6985dc50015abde7e36a04dd8cf68920de7232ab8d1fb28ab94ac05466c1f9d9a3a658f2054fce7868e2c","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84682013a81c8808082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86682013a808082ea6082ea6094ff000000000000000000000000000000000003ec6480c080a0415ad0a93225eaec617206ec835e362d5e75fd0e1903747c1806270ec2684c7da0487ec1479cdb2affa891ff56413818ec169651c906ab932594b6e5bbb79d4998","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e382013a808082ea6082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86782013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec6480c001a0a46ac278c400ef099ad23ac4ccb066a37db8bb5c4d65e0a347152a499ae9eb92a07505f9c67f0897cbe6f848c9a2164c3c234dab2fea7a4dd6f4436be34080e2ff","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e482013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86682013a808082ea6082ea6094ff000000000000000000000000000000000003ec6480c080a0415ad0a93225eaec617206ec835e362d5e75fd0e1903747c1806270ec2684c7da0487ec1479cdb2affa891ff56413818ec169651c906ab932594b6e5bbb79d4998","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e382013a808082ea6082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86782013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec6480c001a0a46ac278c400ef099ad23ac4ccb066a37db8bb5c4d65e0a347152a499ae9eb92a07505f9c67f0897cbe6f848c9a2164c3c234dab2fea7a4dd6f4436be34080e2ff","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e482013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f88a82013a808082ea6082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a0a43aba5078d2da3ecc1ec0c67191f8cf58f29f5b4db7f8d4765ea691ddbd4195a0110e568c803db5ea587b406f452cf49ddf6b6f24d41207973d6c785ffaed1454","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84782013a808082ea6082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88b82013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a00caeadf2fcba95f0deab5ee4899348ecac4a18eeb09317d6f8156b891626d219a0549c5376aba320889c2f7b61fd4a51aec5f9a1d9ed9b26cef0a3bee52fac4989","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84882013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86682013a8082ea608082ea6094ff000000000000000000000000000000000003ec6480c001a07b5568d8a3ec3c7e126f570955db304e31d3f3d7b0c4fd103b6d064a2f6f5e23a030a1b17f299352ae193b8dbce2adda473ccb04e00670f416877762971697606f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e382013a8082ea608082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86782013a81c882ea608082ea6094ff000000000000000000000000000000000003ec6480c080a07bb69d01062f9d6ecb011ad344bbe08d4eca2f6b192dde45015def4c2e6096e0a03a3df52d753e3293d2fd544f72e62ceae00ea6dcab7229685d7b1873d873d203","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e482013a81c882ea608082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86682013a8082ea608082ea6094ff000000000000000000000000000000000003ec6480c001a07b5568d8a3ec3c7e126f570955db304e31d3f3d7b0c4fd103b6d064a2f6f5e23a030a1b17f299352ae193b8dbce2adda473ccb04e00670f416877762971697606f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e382013a8082ea608082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86782013a81c882ea608082ea6094ff000000000000000000000000000000000003ec6480c080a07bb69d01062f9d6ecb011ad344bbe08d4eca2f6b192dde45015def4c2e6096e0a03a3df52d753e3293d2fd544f72e62ceae00ea6dcab7229685d7b1873d873d203","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e482013a81c882ea608082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f88a82013a8082ea608082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0621255015626b35acf19629ce318999336441537920f9f3ff1bfd44e54d8abd3a03b3426f8fa963debdfa6b44561772bdebc9524c7f63abd0d947b678f5e966502","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84782013a8082ea608082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88b82013a81c882ea608082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0b73c3ba53fc5a0f7fab636cc2b826c3873cda5d0be9dd2100fdceae7899f3310a0491905f676063924cf847fdf2e488be4606ce351748e5c88d49ed50c8d595c94","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84882013a81c882ea608082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86882013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec6480c001a0e60702e3f5c5f56e3d1bc2907015ec889d0557ea14e81f137056471fef0fdb9da066e601e6e55c2e37e2042401b352e81841d492d0fe4f05bfe81bba29c9e6ce1f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e582013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86982013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec6480c001a085a947fb201d0b50272e7bb7a056adc9ee6f5904634ed91dbde0d650641b7de3a03635c731769302e955d41f794a63262d5d4d37d117c9db89a6b6bce927b71f42","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e682013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86882013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec6480c001a0e60702e3f5c5f56e3d1bc2907015ec889d0557ea14e81f137056471fef0fdb9da066e601e6e55c2e37e2042401b352e81841d492d0fe4f05bfe81bba29c9e6ce1f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e582013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f86982013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec6480c001a085a947fb201d0b50272e7bb7a056adc9ee6f5904634ed91dbde0d650641b7de3a03635c731769302e955d41f794a63262d5d4d37d117c9db89a6b6bce927b71f42","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e682013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec6480c0"},{"input":"0x02f88c82013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0d67e28d31489af5129c4832af814a01e0baa5e5ba6245fe2d3304693ceea48e0a03bc06f1c6dd01a14826c67aa35258c0bbf7c516a9bb21e9190eaa8d3768f49bb","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84982013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88d82013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0a5368984aca4bc1e3d7ebc7ae4ead5e09ffd3b4b4712d039c19fdac948e5952ea065953ace0a29210440d6a0f05d6b43f482950b463b3be6b23fc63452c94b9446","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"100\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84a82013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec64a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86a82013a8080808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a086da25ab078729b08cf48da02eb1c1e05fe0f4e5d7b332262b68f4db3dc9b72fa04102c03c7d9f11a6fdb77d6a36d3f07e09b1ceaab0bf4ef1fdc604bcd726f83b","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e782013a8080808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86b82013a81c880808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a0cde92f395919b3205b4260867b11597f9ecf363bc1be9bbd8b5400d3381d64b3a01b9555cfa22ee8615c3033235ebad605d0bef616d08876de26719866fcc4d41e","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e882013a81c880808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86a82013a8080808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a086da25ab078729b08cf48da02eb1c1e05fe0f4e5d7b332262b68f4db3dc9b72fa04102c03c7d9f11a6fdb77d6a36d3f07e09b1ceaab0bf4ef1fdc604bcd726f83b","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e782013a8080808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86b82013a81c880808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a0cde92f395919b3205b4260867b11597f9ecf363bc1be9bbd8b5400d3381d64b3a01b9555cfa22ee8615c3033235ebad605d0bef616d08876de26719866fcc4d41e","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02e882013a81c880808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f88e82013a8080808094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a03dd64e48a1ae228665b3f180367997ee96bc60ee226615c900e3d86634044328a00f6cdb24633e75fa65f6b93fce9b084c1f30dd03dde97d01f25c6f10f34d5d9d","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84b82013a8080808094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f88f82013a81c880808094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a07475efeb8dd5bf4ba7efb31ab67a9077401ed71f4e8dd13e7058ce5cfeb5a0f2a01046e93a5258bf320bc392173a49b6fef15976be4c1210f2e367af223ad8c026","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84c82013a81c880808094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86c82013a808082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a0ca84441c7ba097a7afa5ef9ad7ef70ba58ddfffc06c5d015b5c8553f1632d103a057fee6d92055c9c031a1efa667f3ee554804c4f34a195b6dfc781e1592c20444","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e982013a808082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86d82013a81c88082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a04055dfcd6e0b7264d3474ba13f76659384e5f365ebc6ba271641481b12bf410ca01ef7d04dc33fdf0c3137e31d8c822ad68bbd4f89ada52db9705bb66813d11583","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ea82013a81c88082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86c82013a808082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a0ca84441c7ba097a7afa5ef9ad7ef70ba58ddfffc06c5d015b5c8553f1632d103a057fee6d92055c9c031a1efa667f3ee554804c4f34a195b6dfc781e1592c20444","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e982013a808082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86d82013a81c88082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a04055dfcd6e0b7264d3474ba13f76659384e5f365ebc6ba271641481b12bf410ca01ef7d04dc33fdf0c3137e31d8c822ad68bbd4f89ada52db9705bb66813d11583","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ea82013a81c88082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f89082013a808082ea608094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a02080212bb64a798e1e138e4991ab830cf04d37ffeedf6fde7eba0eb7d972b350a02aff43f9e5ca8d6cea6e918391188fa37bdb91b864eadec705f7c69c4a61bc5a","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84d82013a808082ea608094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f89182013a81c88082ea608094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a0e41c052d72950a563b8ed7fb15855beabea43ff5b038bd6a3ccc6416e3498619a0568bbd7cbff31a47e1d0b9712f382c52e74b7b28cbcb8458974d82a8d54ddc57","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84e82013a81c88082ea608094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86c82013a8082ea60808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c080a057c342304f133ff8832d3d16a43571afe905dc9b10afc24c6e99225cca6d8817a00e2155d1904751ce0d2ba01e6475aeae254c02966773f5bc7650e37252a01a92","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e982013a8082ea60808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86d82013a81c882ea60808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a0fc2a550a7798085cae28028abbe4829be29e5f3a40af221086831d0e17ca3c83a01ce21f5934b9ca566958e09e89c99fd9ed2dc4acae209a6fb81fd3a6c9879a99","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ea82013a81c882ea60808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86c82013a8082ea60808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c080a057c342304f133ff8832d3d16a43571afe905dc9b10afc24c6e99225cca6d8817a00e2155d1904751ce0d2ba01e6475aeae254c02966773f5bc7650e37252a01a92","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e982013a8082ea60808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86d82013a81c882ea60808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a0fc2a550a7798085cae28028abbe4829be29e5f3a40af221086831d0e17ca3c83a01ce21f5934b9ca566958e09e89c99fd9ed2dc4acae209a6fb81fd3a6c9879a99","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ea82013a81c882ea60808094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f89082013a8082ea60808094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0fa33b63666310ca1c72fc5d82639c5b8e2a7638910be7bee23ada9f139c6b891a02012cad8e991beea7dcf0b6e9346b0228699698e183e2fadfc5b9b880601af9b","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84d82013a8082ea60808094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f89182013a81c882ea60808094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0bc6ae4e92e7a20d5ff61258653dffda636cee0fd97dd156eac7a1f231f1f2785a0323055e0e0bed496b3fec30be292338d0956ecf8baeeb34458230821589aa7fb","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84e82013a81c882ea60808094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86e82013a8082ea6082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c080a0bd2889395392859a83a33bfe549c09d172e1f289de29d4bc9d0a3d25ea8aa71ba075fe92140a08d8e680061852438623c9cd10e211955577d1a3b56e49e960e4e7","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02eb82013a8082ea6082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86f82013a81c882ea6082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c080a05553c929ae32692a9f742371ffcfc8c8d2b77f31a7795460297cb78c29e357e8a043e42ca4ed7eb1b8e3546de2364522735d79a2e2ff5d16f7f96d165c5815c80c","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ec82013a81c882ea6082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86e82013a8082ea6082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c080a0bd2889395392859a83a33bfe549c09d172e1f289de29d4bc9d0a3d25ea8aa71ba075fe92140a08d8e680061852438623c9cd10e211955577d1a3b56e49e960e4e7","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02eb82013a8082ea6082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86f82013a81c882ea6082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c080a05553c929ae32692a9f742371ffcfc8c8d2b77f31a7795460297cb78c29e357e8a043e42ca4ed7eb1b8e3546de2364522735d79a2e2ff5d16f7f96d165c5815c80c","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ec82013a81c882ea6082ea608094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f89282013a8082ea6082ea608094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a055f63a6bef8e23dc437ff4ac9349a59fcde2f72d1879de50b0d3686ff648749da04cf8034df06cf6f15f31bb55979b40eeacbd28fb1d745e608acdc088e22beb66","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84f82013a8082ea6082ea608094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f89382013a81c882ea6082ea608094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0c4a0253448dad999692c1bf3cfb5de9e95a2e96da4e1f64133ada452a825fe9aa0757b576ceb7a2c494819960ac59e9d3a4e3da384f23c0e88ada758dc265eae94","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":0,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f85082013a81c882ea6082ea608094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86c82013a80808082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c080a02632c4d8a443afb8d39f91d036fd4915ca3ad2f253b8f93211b4b3ee15566519a009bdc00c8eaaf22f3d7d04b53dbc777fd027a780fb4ddaf01002724ddf2879dd","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e982013a80808082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86d82013a81c8808082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a08bda02c15ca37d35d9ad2e2f7731d24dd039f5c6c6f7eaad739daadac6db33e5a044c01e493e10929e4021c69d9df886b211eb349a865df9f0796846ad1cdf23e8","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ea82013a81c8808082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86c82013a80808082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c080a02632c4d8a443afb8d39f91d036fd4915ca3ad2f253b8f93211b4b3ee15566519a009bdc00c8eaaf22f3d7d04b53dbc777fd027a780fb4ddaf01002724ddf2879dd","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02e982013a80808082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86d82013a81c8808082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a08bda02c15ca37d35d9ad2e2f7731d24dd039f5c6c6f7eaad739daadac6db33e5a044c01e493e10929e4021c69d9df886b211eb349a865df9f0796846ad1cdf23e8","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ea82013a81c8808082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f89082013a80808082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0ed0db75f41b2b8b89768ce5ad08716aff149dc1d5a2e593140d8964eb2da3229a02e5248cca9b5af340d73271cad4d690f7efa11c9278824aca528eb15d28aec4d","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84d82013a80808082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f89182013a81c8808082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a07108fbbabc45826dbdc8e4cf831240fb39ead7bd4b8ec5d8de64d04e2885e554a04dae4fb4bdbabb9d8f923d579e75ee980da1b4fac5773ec68f395af240f037f0","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84e82013a81c8808082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86e82013a808082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a0130b6723050095faa2e7abc69c2f785e73d333c65fae6cf2835518f970c627d5a00b90bd4f2ded1da0163ab5e81ad76d51aef005d663137347fc550313e1c8b6fc","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02eb82013a808082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86f82013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c080a0993a50431e82d10d632466d45f8aaffea9a56efa59d529dfd497d3c2a06aabeba0070d3132c6ce1e4ff70b0721d1f4c03ab566b8e2af29d33148033fb3009dc29d","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ec82013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86e82013a808082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a0130b6723050095faa2e7abc69c2f785e73d333c65fae6cf2835518f970c627d5a00b90bd4f2ded1da0163ab5e81ad76d51aef005d663137347fc550313e1c8b6fc","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02eb82013a808082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86f82013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c080a0993a50431e82d10d632466d45f8aaffea9a56efa59d529dfd497d3c2a06aabeba0070d3132c6ce1e4ff70b0721d1f4c03ab566b8e2af29d33148033fb3009dc29d","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ec82013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f89282013a808082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a09c9d3b0d7b58bfe81a6881b9db184e0ade03c1ad11aa8f1566e2f24f50f85525a06c10cf91f4dbc24d0f78ef09a8e2310d349a034cec7e86e807d7a48ea26161e1","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84f82013a808082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f89382013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a0f8423b51e513618c6a4bdd2696479d91c760e11ea24657dd27fa6eb9b7da8c0ea07e9456113fb034718d1b4f4e09ade1ce78251a8c86f298b152850bc5925156cb","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"0\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f85082013a81c88082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f86e82013a8082ea608082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a0d09b373d45c1bfc1c5d9b5198e69f974d4df456245e2f7a5edd486f3dd2795a9a011396197a670e7b0c4613b7ebf8aee53382930c7bd25c35dda15acae78ec0e2c","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02eb82013a8082ea608082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86f82013a81c882ea608082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c080a0131f5af3ece9a0b723d0c812dbcfc6cb458acf5e0846cc506215fc04d6af66d5a078d0bf7a40cc1ddcebbc4e86fb9a04bfc94f3da94b4a74476883b7b1729f8a44","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ec82013a81c882ea608082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86e82013a8082ea608082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a0d09b373d45c1bfc1c5d9b5198e69f974d4df456245e2f7a5edd486f3dd2795a9a011396197a670e7b0c4613b7ebf8aee53382930c7bd25c35dda15acae78ec0e2c","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02eb82013a8082ea608082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f86f82013a81c882ea608082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c080a0131f5af3ece9a0b723d0c812dbcfc6cb458acf5e0846cc506215fc04d6af66d5a078d0bf7a40cc1ddcebbc4e86fb9a04bfc94f3da94b4a74476883b7b1729f8a44","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ec82013a81c882ea608082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f89282013a8082ea608082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0c286f4ee350eab70273cf9a952537534446a0f39e9bfea7340eabc04396a0e3da01e1302ae987a69836ec2c9266e6fe623db5fcdc566e37084c0c57630c4de8ee6","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f84f82013a8082ea608082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f89382013a81c882ea608082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c080a09dee3fa88e365133a18035618af718a045e1a957f10f50c632f23923fd337b9ba06bbbd59489849803f8c61138932ac1a8361edb4c80789d030542829c0a2b5b7f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"0\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f85082013a81c882ea608082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f87082013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a0c1cb1e2b41e48fecd59d72039147c76993653f061f9ea156b53c377673eef7f1a01822506f755206b60209a12ed3c84446f4fcb4ad602fa7ab7ee4ff2acde19ed6","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02ed82013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f87182013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a09817043ad22797d2f26ca46697db5f586c38336a171dce2d22d659889e9e9eb5a0369a5d6169586d9c831b6e017aa29fd49eac0636a136bfa5bafb95390fa95b8f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":null,\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ee82013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f87082013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a0c1cb1e2b41e48fecd59d72039147c76993653f061f9ea156b53c377673eef7f1a01822506f755206b60209a12ed3c84446f4fcb4ad602fa7ab7ee4ff2acde19ed6","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02ed82013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f87182013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c001a09817043ad22797d2f26ca46697db5f586c38336a171dce2d22d659889e9e9eb5a0369a5d6169586d9c831b6e017aa29fd49eac0636a136bfa5bafb95390fa95b8f","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02ee82013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a764000080c0"},{"input":"0x02f89482013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a039357ad40087d17551ca2b94723f0394185a993671db02172a7de70c24054852a046c84070dfadd244b358690e5b89c75f3988b21b6614e6e3af2f8ca302d6c42a","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":0,\"type\":2,\"chainId\":314}","nosigTx":"0x02f85182013a8082ea6082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"},{"input":"0x02f89582013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c001a0c991c81705a4c53a9255e72beb8243638c68f10c63b082755972bbbe15245d12a014f6852ae34c92882559e6810d4372109930a23b522368fdef2c85ce04e27839","output":"{\"to\":\"0xff000000000000000000000000000000000003EC\",\"value\":\"1000000000000000000\",\"gasLimit\":60000,\"maxFeePerGas\":\"60000\",\"maxPriorityFeePerGas\":\"60000\",\"data\":\"0xf8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064\",\"nonce\":200,\"type\":2,\"chainId\":314}","nosigTx":"0x02f85282013a81c882ea6082ea6082ea6094ff000000000000000000000000000000000003ec880de0b6b3a7640000a4f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064c0"}]` + to, err := address.NewIDAddress(1) + require.NoError(t, err) - testcases := []struct { - Input EthBytes `json:"input"` - Output string `json:"output"` - NosigTx string `json:"nosigTx"` - }{} + toEth, err := EthAddressFromFilecoinAddress(to) + require.NoError(t, err) - err := json.Unmarshal([]byte(tcstr), &testcases) - if err != nil { - return nil, err + tcs := map[string]struct { + msg *types.SignedMessage + expectedErr string + validateFunc func(t *testing.T, smsg *types.SignedMessage, tx EthTransaction) + }{ + "empty": { + expectedErr: "signed message is nil", + }, + "invalid-signature": { + msg: &types.SignedMessage{ + Message: types.Message{ + To: builtintypes.EthereumAddressManagerActorAddr, + From: from, + Method: builtintypes.MethodsEAM.CreateExternal, + }, + Signature: crypto.Signature{ + Type: crypto.SigTypeDelegated, + Data: []byte{1}, + }, + }, + expectedErr: "unsupported signature length", + }, + "valid-eip1559": { + msg: &types.SignedMessage{ + Message: types.Message{ + From: from, + To: to, + Value: big.NewInt(10), + GasFeeCap: big.NewInt(11), + GasPremium: big.NewInt(12), + GasLimit: 13, + Nonce: 14, + Method: builtintypes.MethodsEVM.InvokeContract, + }, + Signature: crypto.Signature{ + Type: crypto.SigTypeDelegated, + Data: eip1559sig, + }, + }, + validateFunc: func(t *testing.T, smsg *types.SignedMessage, tx EthTransaction) { + eip1559tx := tx.(*Eth1559TxArgs) + require.Equal(t, big.NewInt(10), eip1559tx.Value) + require.Equal(t, big.NewInt(11), eip1559tx.MaxFeePerGas) + require.Equal(t, big.NewInt(12), eip1559tx.MaxPriorityFeePerGas) + require.EqualValues(t, uint64(13), eip1559tx.GasLimit) + require.EqualValues(t, uint64(14), eip1559tx.Nonce) + require.EqualValues(t, toEth, *eip1559tx.To) + require.EqualValues(t, 314, eip1559tx.ChainID) + require.Empty(t, eip1559tx.Input) + + ethTx, err := tx.ToEthTx(smsg) + require.NoError(t, err) + require.EqualValues(t, 314, ethTx.ChainID) + require.EqualValues(t, 14, ethTx.Nonce) + hash, err := eip1559tx.TxHash() + require.NoError(t, err) + require.EqualValues(t, hash, ethTx.Hash) + require.EqualValues(t, fromEth, ethTx.From) + require.EqualValues(t, toEth, *ethTx.To) + require.EqualValues(t, big.NewInt(10), ethTx.Value) + require.EqualValues(t, 13, ethTx.Gas) + require.EqualValues(t, big.NewInt(11), *ethTx.MaxFeePerGas) + require.EqualValues(t, big.NewInt(12), *ethTx.MaxPriorityFeePerGas) + require.Nil(t, ethTx.GasPrice) + require.Empty(t, ethTx.AccessList) + }, + }, + "valid-legacy": { + msg: &types.SignedMessage{ + Message: types.Message{ + From: from, + To: to, + Value: big.NewInt(10), + GasFeeCap: big.NewInt(11), + GasPremium: big.NewInt(12), + GasLimit: 13, + Nonce: 14, + Method: builtintypes.MethodsEVM.InvokeContract, + }, + Signature: crypto.Signature{ + Type: crypto.SigTypeDelegated, + Data: legacySig, + }, + }, + validateFunc: func(t *testing.T, smsg *types.SignedMessage, tx EthTransaction) { + legacyTx := tx.(*EthLegacyHomesteadTxArgs) + require.Equal(t, big.NewInt(10), legacyTx.Value) + require.EqualValues(t, uint64(13), legacyTx.GasLimit) + require.EqualValues(t, uint64(14), legacyTx.Nonce) + require.EqualValues(t, toEth, *legacyTx.To) + require.EqualValues(t, big.NewInt(11), legacyTx.GasPrice) + require.Empty(t, legacyTx.Input) + + ethTx, err := tx.ToEthTx(smsg) + require.NoError(t, err) + require.EqualValues(t, 0, ethTx.ChainID) + require.EqualValues(t, 14, ethTx.Nonce) + hash, err := legacyTx.TxHash() + require.NoError(t, err) + require.EqualValues(t, big.NewInt(11), *ethTx.GasPrice) + require.EqualValues(t, hash, ethTx.Hash) + require.EqualValues(t, fromEth, ethTx.From) + require.EqualValues(t, toEth, *ethTx.To) + require.EqualValues(t, big.NewInt(10), ethTx.Value) + require.EqualValues(t, 13, ethTx.Gas) + require.Nil(t, ethTx.MaxFeePerGas) + require.Nil(t, ethTx.MaxPriorityFeePerGas) + require.Empty(t, ethTx.AccessList) + require.EqualValues(t, big.NewInt(27), ethTx.V) + }, + }, } - res := []TxTestcase{} - for _, tc := range testcases { - tx := EthTxArgs{} - err := json.Unmarshal([]byte(tc.Output), &tx) - if err != nil { - return nil, err - } - res = append(res, TxTestcase{ - Input: tc.Input, - Output: tx, - TxJSON: tc.Output, - NosigTx: tc.NosigTx, + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + tx, err := EthTransactionFromSignedFilecoinMessage(tc.msg) + if tc.expectedErr != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.expectedErr) + } else { + require.NoError(t, err) + } + if tc.validateFunc != nil { + tc.validateFunc(t, tc.msg, tx) + } }) } - - return res, err } diff --git a/chain/types/ethtypes/eth_types.go b/chain/types/ethtypes/eth_types.go index 893c0721c85..3c2b9bec031 100644 --- a/chain/types/ethtypes/eth_types.go +++ b/chain/types/ethtypes/eth_types.go @@ -1011,6 +1011,14 @@ type EthTraceReplayBlockTransaction struct { VmTrace *string `json:"vmTrace"` } +type EthTraceTransaction struct { + *EthTrace + BlockHash EthHash `json:"blockHash"` + BlockNumber int64 `json:"blockNumber"` + TransactionHash EthHash `json:"transactionHash"` + TransactionPosition int `json:"transactionPosition"` +} + type EthCallTraceAction struct { CallType string `json:"callType"` From EthAddress `json:"from"` diff --git a/chain/types/ethtypes/rlp_test.go b/chain/types/ethtypes/rlp_test.go index 0ce6e15d926..0c74cf58c0a 100644 --- a/chain/types/ethtypes/rlp_test.go +++ b/chain/types/ethtypes/rlp_test.go @@ -192,7 +192,7 @@ func TestDecodeError(t *testing.T) { func TestDecode1(t *testing.T) { b := mustDecodeHex("0x02f8758401df5e7680832c8411832c8411830767f89452963ef50e27e06d72d59fcb4f3c2a687be3cfef880de0b6b3a764000080c080a094b11866f453ad85a980e0e8a2fc98cbaeb4409618c7734a7e12ae2f66fd405da042dbfb1b37af102023830ceeee0e703ffba0b8b3afeb8fe59f405eca9ed61072") - decoded, err := ParseEthTxArgs(b) + decoded, err := parseEip1559Tx(b) require.NoError(t, err) sender, err := decoded.Sender() diff --git a/chain/types/fil.go b/chain/types/fil.go index 960a42f2879..227e9442fbd 100644 --- a/chain/types/fil.go +++ b/chain/types/fil.go @@ -68,7 +68,7 @@ func (f FIL) Nano() string { func (f FIL) Format(s fmt.State, ch rune) { switch ch { case 's', 'v': - fmt.Fprint(s, f.String()) + _, _ = fmt.Fprint(s, f.String()) default: f.Int.Format(s, ch) } diff --git a/cli/clicommands/cmd.go b/cli/clicommands/cmd.go index a37ce329acc..791a11927d7 100644 --- a/cli/clicommands/cmd.go +++ b/cli/clicommands/cmd.go @@ -10,7 +10,6 @@ var Commands = []*cli.Command{ lcli.WithCategory("basic", lcli.SendCmd), lcli.WithCategory("basic", lcli.WalletCmd), lcli.WithCategory("basic", lcli.InfoCmd), - lcli.WithCategory("basic", lcli.ClientCmd), lcli.WithCategory("basic", lcli.MultisigCmd), lcli.WithCategory("basic", lcli.FilplusCmd), lcli.WithCategory("basic", lcli.PaychCmd), diff --git a/cli/client.go b/cli/client.go deleted file mode 100644 index e40a6686679..00000000000 --- a/cli/client.go +++ /dev/null @@ -1,2468 +0,0 @@ -package cli - -import ( - "bufio" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "math" - "math/rand" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "text/tabwriter" - "time" - - tm "github.com/buger/goterm" - "github.com/chzyer/readline" - "github.com/docker/go-units" - "github.com/fatih/color" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-cidutil/cidenc" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/multiformats/go-multibase" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - datatransfer "github.com/filecoin-project/go-data-transfer/v2" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - - "github.com/filecoin-project/lotus/api" - lapi "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/v0api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/builtin" - "github.com/filecoin-project/lotus/chain/actors/builtin/market" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/tablewriter" - "github.com/filecoin-project/lotus/node/repo/imports" -) - -var CidBaseFlag = cli.StringFlag{ - Name: "cid-base", - Hidden: true, - Value: "base32", - Usage: "Multibase encoding used for version 1 CIDs in output.", - DefaultText: "base32", -} - -// GetCidEncoder returns an encoder using the `cid-base` flag if provided, or -// the default (Base32) encoder if not. -func GetCidEncoder(cctx *cli.Context) (cidenc.Encoder, error) { - val := cctx.String("cid-base") - - e := cidenc.Encoder{Base: multibase.MustNewEncoder(multibase.Base32)} - - if val != "" { - var err error - e.Base, err = multibase.EncoderByName(val) - if err != nil { - return e, err - } - } - - return e, nil -} - -var ClientCmd = &cli.Command{ - Name: "client", - Usage: "Make deals, store data, retrieve data", - Subcommands: []*cli.Command{ - WithCategory("storage", clientDealCmd), - WithCategory("storage", clientQueryAskCmd), - WithCategory("storage", clientListDeals), - WithCategory("storage", clientGetDealCmd), - WithCategory("storage", clientListAsksCmd), - WithCategory("storage", clientDealStatsCmd), - WithCategory("storage", clientInspectDealCmd), - WithCategory("data", clientImportCmd), - WithCategory("data", clientDropCmd), - WithCategory("data", clientLocalCmd), - WithCategory("data", clientStat), - WithCategory("retrieval", clientFindCmd), - WithCategory("retrieval", clientQueryRetrievalAskCmd), - WithCategory("retrieval", clientRetrieveCmd), - WithCategory("retrieval", clientRetrieveCatCmd), - WithCategory("retrieval", clientRetrieveLsCmd), - WithCategory("retrieval", clientCancelRetrievalDealCmd), - WithCategory("retrieval", clientListRetrievalsCmd), - WithCategory("util", clientCommPCmd), - WithCategory("util", clientCarGenCmd), - WithCategory("util", clientBalancesCmd), - WithCategory("util", clientListTransfers), - WithCategory("util", clientRestartTransfer), - WithCategory("util", clientCancelTransfer), - }, -} - -var clientImportCmd = &cli.Command{ - Name: "import", - Usage: "Import data", - ArgsUsage: "[inputPath]", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "car", - Usage: "import from a car file instead of a regular file", - }, - &cli.BoolFlag{ - Name: "quiet", - Aliases: []string{"q"}, - Usage: "Output root CID only", - }, - &CidBaseFlag, - }, - Action: func(cctx *cli.Context) error { - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - - if cctx.NArg() != 1 { - return IncorrectNumArgs(cctx) - } - - absPath, err := filepath.Abs(cctx.Args().First()) - if err != nil { - return err - } - - ref := lapi.FileRef{ - Path: absPath, - IsCAR: cctx.Bool("car"), - } - c, err := api.ClientImport(ctx, ref) - if err != nil { - return err - } - - encoder, err := GetCidEncoder(cctx) - if err != nil { - return err - } - - if !cctx.Bool("quiet") { - fmt.Printf("Import %d, Root ", c.ImportID) - } - fmt.Println(encoder.Encode(c.Root)) - - return nil - }, -} - -var clientDropCmd = &cli.Command{ - Name: "drop", - Usage: "Remove import", - ArgsUsage: "[import ID...]", - Action: func(cctx *cli.Context) error { - if cctx.NArg() != 1 { - return IncorrectNumArgs(cctx) - } - - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - - var ids []uint64 - for i, s := range cctx.Args().Slice() { - id, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return xerrors.Errorf("parsing %d-th import ID: %w", i, err) - } - - ids = append(ids, id) - } - - for _, id := range ids { - if err := api.ClientRemoveImport(ctx, imports.ID(id)); err != nil { - return xerrors.Errorf("removing import %d: %w", id, err) - } - } - - return nil - }, -} - -var clientCommPCmd = &cli.Command{ - Name: "commP", - Usage: "Calculate the piece-cid (commP) of a CAR file", - ArgsUsage: "[inputFile]", - Flags: []cli.Flag{ - &CidBaseFlag, - }, - Action: func(cctx *cli.Context) error { - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - - if cctx.NArg() != 1 { - return IncorrectNumArgs(cctx) - } - - ret, err := api.ClientCalcCommP(ctx, cctx.Args().Get(0)) - if err != nil { - return err - } - - encoder, err := GetCidEncoder(cctx) - if err != nil { - return err - } - - fmt.Println("CID: ", encoder.Encode(ret.Root)) - fmt.Println("Piece size: ", types.SizeStr(types.NewInt(uint64(ret.Size)))) - fmt.Println("Piece size in bytes: ", types.NewInt(uint64(ret.Size))) - return nil - }, -} - -var clientCarGenCmd = &cli.Command{ - Name: "generate-car", - Usage: "Generate a car file from input", - ArgsUsage: "[inputPath outputPath]", - Action: func(cctx *cli.Context) error { - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - - if cctx.NArg() != 2 { - return IncorrectNumArgs(cctx) - } - - ref := lapi.FileRef{ - Path: cctx.Args().First(), - IsCAR: false, - } - - op := cctx.Args().Get(1) - - if err = api.ClientGenCar(ctx, ref, op); err != nil { - return err - } - return nil - }, -} - -var clientLocalCmd = &cli.Command{ - Name: "local", - Usage: "List locally imported data", - Flags: []cli.Flag{ - &CidBaseFlag, - }, - Action: func(cctx *cli.Context) error { - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - - list, err := api.ClientListImports(ctx) - if err != nil { - return err - } - - encoder, err := GetCidEncoder(cctx) - if err != nil { - return err - } - - sort.Slice(list, func(i, j int) bool { - return list[i].Key < list[j].Key - }) - - for _, v := range list { - cidStr := "" - if v.Root != nil { - cidStr = encoder.Encode(*v.Root) - } - - fmt.Printf("%d: %s @%s (%s)\n", v.Key, cidStr, v.FilePath, v.Source) - if v.Err != "" { - fmt.Printf("\terror: %s\n", v.Err) - } - } - return nil - }, -} - -var clientDealCmd = &cli.Command{ - Name: "deal", - Usage: "Initialize storage deal with a miner", - Description: `Make a deal with a miner. -dataCid comes from running 'lotus client import'. -miner is the address of the miner you wish to make a deal with. -price is measured in FIL/Epoch. Miners usually don't accept a bid -lower than their advertised ask (which is in FIL/GiB/Epoch). You can check a miners listed price -with 'lotus client query-ask '. -duration is how long the miner should store the data for, in blocks. -The minimum value is 518400 (6 months).`, - ArgsUsage: "[dataCid miner price duration]", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "manual-piece-cid", - Usage: "manually specify piece commitment for data (dataCid must be to a car file)", - }, - &cli.Int64Flag{ - Name: "manual-piece-size", - Usage: "if manually specifying piece cid, used to specify size (dataCid must be to a car file)", - }, - &cli.BoolFlag{ - Name: "manual-stateless-deal", - Usage: "instructs the node to send an offline deal without registering it with the deallist/fsm", - }, - &cli.StringFlag{ - Name: "from", - Usage: "specify address to fund the deal with", - }, - &cli.Int64Flag{ - Name: "start-epoch", - Usage: "specify the epoch that the deal should start at", - Value: -1, - }, - &cli.BoolFlag{ - Name: "fast-retrieval", - Usage: "indicates that data should be available for fast retrieval", - Value: true, - }, - &cli.BoolFlag{ - Name: "verified-deal", - Usage: "indicate that the deal counts towards verified client total", - DefaultText: "true if client is verified, false otherwise", - }, - &cli.StringFlag{ - Name: "provider-collateral", - Usage: "specify the requested provider collateral the miner should put up", - }, - &CidBaseFlag, - }, - Action: func(cctx *cli.Context) error { - - expectedArgsMsg := "expected 4 args: dataCid, miner, price, duration" - - if !cctx.Args().Present() { - if cctx.Bool("manual-stateless-deal") { - return xerrors.New("--manual-stateless-deal can not be combined with interactive deal mode: you must specify the " + expectedArgsMsg) - } - return interactiveDeal(cctx) - } - - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - afmt := NewAppFmt(cctx.App) - - if cctx.NArg() != 4 { - return IncorrectNumArgs(cctx) - } - - // [data, miner, price, dur] - - data, err := cid.Parse(cctx.Args().Get(0)) - if err != nil { - return err - } - - miner, err := address.NewFromString(cctx.Args().Get(1)) - if err != nil { - return err - } - - price, err := types.ParseFIL(cctx.Args().Get(2)) - if err != nil { - return err - } - - dur, err := strconv.ParseInt(cctx.Args().Get(3), 10, 32) - if err != nil { - return err - } - - var provCol big.Int - if pcs := cctx.String("provider-collateral"); pcs != "" { - pc, err := big.FromString(pcs) - if err != nil { - return fmt.Errorf("failed to parse provider-collateral: %w", err) - } - provCol = pc - } - - if abi.ChainEpoch(dur) < build.MinDealDuration { - return xerrors.Errorf("minimum deal duration is %d blocks", build.MinDealDuration) - } - if abi.ChainEpoch(dur) > build.MaxDealDuration { - return xerrors.Errorf("maximum deal duration is %d blocks", build.MaxDealDuration) - } - - var a address.Address - if from := cctx.String("from"); from != "" { - faddr, err := address.NewFromString(from) - if err != nil { - return xerrors.Errorf("failed to parse 'from' address: %w", err) - } - a = faddr - } else { - def, err := api.WalletDefaultAddress(ctx) - if err != nil { - return err - } - a = def - } - - ref := &storagemarket.DataRef{ - TransferType: storagemarket.TTGraphsync, - Root: data, - } - - if mpc := cctx.String("manual-piece-cid"); mpc != "" { - c, err := cid.Parse(mpc) - if err != nil { - return xerrors.Errorf("failed to parse provided manual piece cid: %w", err) - } - - ref.PieceCid = &c - - psize := cctx.Int64("manual-piece-size") - if psize == 0 { - return xerrors.Errorf("must specify piece size when manually setting cid") - } - - ref.PieceSize = abi.UnpaddedPieceSize(psize) - - ref.TransferType = storagemarket.TTManual - } - - // Check if the address is a verified client - dcap, err := api.StateVerifiedClientStatus(ctx, a, types.EmptyTSK) - if err != nil { - return err - } - - isVerified := dcap != nil - - // If the user has explicitly set the --verified-deal flag - if cctx.IsSet("verified-deal") { - // If --verified-deal is true, but the address is not a verified - // client, return an error - verifiedDealParam := cctx.Bool("verified-deal") - if verifiedDealParam && !isVerified { - return xerrors.Errorf("address %s does not have verified client status", a) - } - - // Override the default - isVerified = verifiedDealParam - } - - sdParams := &lapi.StartDealParams{ - Data: ref, - Wallet: a, - Miner: miner, - EpochPrice: types.BigInt(price), - MinBlocksDuration: uint64(dur), - DealStartEpoch: abi.ChainEpoch(cctx.Int64("start-epoch")), - FastRetrieval: cctx.Bool("fast-retrieval"), - VerifiedDeal: isVerified, - ProviderCollateral: provCol, - } - - var proposal *cid.Cid - if cctx.Bool("manual-stateless-deal") { - if ref.TransferType != storagemarket.TTManual || price.Int64() != 0 { - return xerrors.New("when manual-stateless-deal is enabled, you must also provide a 'price' of 0 and specify 'manual-piece-cid' and 'manual-piece-size'") - } - proposal, err = api.ClientStatelessDeal(ctx, sdParams) - } else { - proposal, err = api.ClientStartDeal(ctx, sdParams) - } - - if err != nil { - return err - } - - encoder, err := GetCidEncoder(cctx) - if err != nil { - return err - } - - afmt.Println(encoder.Encode(*proposal)) - - return nil - }, -} - -func interactiveDeal(cctx *cli.Context) error { - api, closer, err := GetFullNodeAPIV1(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - ctx, cancel := context.WithCancel(ctx) - defer cancel() - afmt := NewAppFmt(cctx.App) - - state := "import" - gib := types.NewInt(1 << 30) - - var data cid.Cid - var days int - var maddrs []address.Address - var ask []storagemarket.StorageAsk - var epochPrices []big.Int - var dur time.Duration - var epochs abi.ChainEpoch - var verified bool - var ds lapi.DataCIDSize - - // find - var candidateAsks []QueriedAsk - var budget types.FIL - var dealCount int64 - var medianPing, maxAcceptablePing time.Duration - - var a address.Address - if from := cctx.String("from"); from != "" { - faddr, err := address.NewFromString(from) - if err != nil { - return xerrors.Errorf("failed to parse 'from' address: %w", err) - } - a = faddr - } else { - def, err := api.WalletDefaultAddress(ctx) - if err != nil { - return err - } - a = def - } - - if _, err := api.StateGetActor(ctx, a, types.EmptyTSK); err != nil { - return xerrors.Errorf("address not initialized on chain: %w", err) - } - - fromBal, err := api.WalletBalance(ctx, a) - if err != nil { - return xerrors.Errorf("checking from address balance: %w", err) - } - - printErr := func(err error) { - afmt.Printf("%s %s\n", color.RedString("Error:"), err.Error()) - } - - cs := readline.NewCancelableStdin(afmt.Stdin) - go func() { - <-ctx.Done() - _ = cs.Close() - }() - - rl := bufio.NewReader(cs) - -uiLoop: - for { - // TODO: better exit handling - if err := ctx.Err(); err != nil { - return err - } - - switch state { - case "import": - afmt.Print("Data CID (from " + color.YellowString("lotus client import") + "): ") - - _cidStr, _, err := rl.ReadLine() - cidStr := string(_cidStr) - if err != nil { - printErr(xerrors.Errorf("reading cid string: %w", err)) - continue - } - - data, err = cid.Parse(cidStr) - if err != nil { - printErr(xerrors.Errorf("parsing cid string: %w", err)) - continue - } - - color.Blue(".. calculating data size\n") - ds, err = api.ClientDealPieceCID(ctx, data) - if err != nil { - return err - } - - state = "duration" - case "duration": - afmt.Print("Deal duration (days): ") - - _daystr, _, err := rl.ReadLine() - daystr := string(_daystr) - if err != nil { - return err - } - - _, err = fmt.Sscan(daystr, &days) - if err != nil { - printErr(xerrors.Errorf("parsing duration: %w", err)) - continue - } - - minDealDurationDays := uint64(build.MinDealDuration) / (builtin.SecondsInDay / build.BlockDelaySecs) - if days < int(minDealDurationDays) { - printErr(xerrors.Errorf("minimum duration is %d days, got %d", minDealDurationDays, days)) - continue - } - - maxDealDurationDays := uint64(build.MaxDealDuration) / (builtin.SecondsInDay / build.BlockDelaySecs) - if days > int(maxDealDurationDays) { - printErr(xerrors.Errorf("maximum duration is %d days, got %d", maxDealDurationDays, days)) - continue - } - - dur = 24 * time.Hour * time.Duration(days) - epochs = abi.ChainEpoch(dur / (time.Duration(build.BlockDelaySecs) * time.Second)) - - state = "verified" - case "verified": - ts, err := api.ChainHead(ctx) - if err != nil { - return err - } - - dcap, err := api.StateVerifiedClientStatus(ctx, a, ts.Key()) - if err != nil { - return err - } - - if dcap == nil { - state = "miner" - continue - } - - if dcap.Uint64() < uint64(ds.PieceSize) { - color.Yellow(".. not enough DataCap available for a verified deal\n") - state = "miner" - continue - } - - afmt.Print("\nMake this a verified deal? (yes/no): ") - - _yn, _, err := rl.ReadLine() - yn := string(_yn) - if err != nil { - return err - } - - switch yn { - case "yes": - verified = true - case "no": - verified = false - default: - afmt.Println("Type in full 'yes' or 'no'") - continue - } - - state = "miner" - case "miner": - maddrs = maddrs[:0] - ask = ask[:0] - afmt.Print("Miner Addresses (f0.. f0..), none to find: ") - - _maddrsStr, _, err := rl.ReadLine() - maddrsStr := string(_maddrsStr) - if err != nil { - printErr(xerrors.Errorf("reading miner address: %w", err)) - continue - } - - for _, s := range strings.Fields(maddrsStr) { - maddr, err := address.NewFromString(strings.TrimSpace(s)) - if err != nil { - printErr(xerrors.Errorf("parsing miner address: %w", err)) - continue uiLoop - } - - maddrs = append(maddrs, maddr) - } - - state = "query" - if len(maddrs) == 0 { - state = "find" - } - case "find": - asks, err := GetAsks(ctx, api) - if err != nil { - return err - } - - if len(asks) == 0 { - printErr(xerrors.Errorf("no asks found")) - continue uiLoop - } - - medianPing = asks[len(asks)/2].Ping - var avgPing time.Duration - for _, ask := range asks { - avgPing += ask.Ping - } - avgPing /= time.Duration(len(asks)) - - for _, ask := range asks { - if ask.Ask.MinPieceSize > ds.PieceSize { - continue - } - if ask.Ask.MaxPieceSize < ds.PieceSize { - continue - } - candidateAsks = append(candidateAsks, ask) - } - - afmt.Printf("Found %d candidate asks\n", len(candidateAsks)) - afmt.Printf("Average network latency: %s; Median latency: %s\n", avgPing.Truncate(time.Millisecond), medianPing.Truncate(time.Millisecond)) - state = "max-ping" - case "max-ping": - maxAcceptablePing = medianPing - - afmt.Printf("Maximum network latency (default: %s) (ms): ", maxAcceptablePing.Truncate(time.Millisecond)) - _latStr, _, err := rl.ReadLine() - latStr := string(_latStr) - if err != nil { - printErr(xerrors.Errorf("reading maximum latency: %w", err)) - continue - } - - if latStr != "" { - maxMs, err := strconv.ParseInt(latStr, 10, 64) - if err != nil { - printErr(xerrors.Errorf("parsing FIL: %w", err)) - continue uiLoop - } - - maxAcceptablePing = time.Millisecond * time.Duration(maxMs) - } - - var goodAsks []QueriedAsk - for _, candidateAsk := range candidateAsks { - if candidateAsk.Ping < maxAcceptablePing { - goodAsks = append(goodAsks, candidateAsk) - } - } - - if len(goodAsks) == 0 { - afmt.Printf("no asks left after filtering for network latency\n") - continue uiLoop - } - - afmt.Printf("%d asks left after filtering for network latency\n", len(goodAsks)) - candidateAsks = goodAsks - - state = "find-budget" - case "find-budget": - afmt.Printf("Proposing from %s, Current Balance: %s\n", a, types.FIL(fromBal)) - afmt.Print("Maximum budget (FIL): ") // TODO: Propose some default somehow? - - _budgetStr, _, err := rl.ReadLine() - budgetStr := string(_budgetStr) - if err != nil { - printErr(xerrors.Errorf("reading miner address: %w", err)) - continue - } - - budget, err = types.ParseFIL(budgetStr) - if err != nil { - printErr(xerrors.Errorf("parsing FIL: %w", err)) - continue uiLoop - } - - var goodAsks []QueriedAsk - for _, ask := range candidateAsks { - p := ask.Ask.Price - if verified { - p = ask.Ask.VerifiedPrice - } - - epochPrice := types.BigDiv(types.BigMul(p, types.NewInt(uint64(ds.PieceSize))), gib) - totalPrice := types.BigMul(epochPrice, types.NewInt(uint64(epochs))) - - if totalPrice.LessThan(abi.TokenAmount(budget)) { - goodAsks = append(goodAsks, ask) - } - } - candidateAsks = goodAsks - afmt.Printf("%d asks within budget\n", len(candidateAsks)) - state = "find-count" - case "find-count": - afmt.Print("Deals to make (1): ") - dealcStr, _, err := rl.ReadLine() - if err != nil { - printErr(xerrors.Errorf("reading deal count: %w", err)) - continue - } - - dealCount, err = strconv.ParseInt(string(dealcStr), 10, 64) - if err != nil { - printErr(xerrors.Errorf("reading deal count: invalid number")) - continue - } - - color.Blue(".. Picking miners") - - // TODO: some better strategy (this tries to pick randomly) - var pickedAsks []*storagemarket.StorageAsk - pickLoop: - for i := 0; i < 64; i++ { - rand.Shuffle(len(candidateAsks), func(i, j int) { - candidateAsks[i], candidateAsks[j] = candidateAsks[j], candidateAsks[i] - }) - - remainingBudget := abi.TokenAmount(budget) - pickedAsks = []*storagemarket.StorageAsk{} - - for _, ask := range candidateAsks { - p := ask.Ask.Price - if verified { - p = ask.Ask.VerifiedPrice - } - - epochPrice := types.BigDiv(types.BigMul(p, types.NewInt(uint64(ds.PieceSize))), gib) - totalPrice := types.BigMul(epochPrice, types.NewInt(uint64(epochs))) - - if totalPrice.GreaterThan(remainingBudget) { - continue - } - - pickedAsks = append(pickedAsks, ask.Ask) - remainingBudget = big.Sub(remainingBudget, totalPrice) - - if len(pickedAsks) == int(dealCount) { - break pickLoop - } - } - } - - for _, pickedAsk := range pickedAsks { - maddrs = append(maddrs, pickedAsk.Miner) - ask = append(ask, *pickedAsk) - } - - state = "confirm" - case "query": - color.Blue(".. querying miner asks") - - for _, maddr := range maddrs { - mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - printErr(xerrors.Errorf("failed to get peerID for miner: %w", err)) - state = "miner" - continue uiLoop - } - - a, err := api.ClientQueryAsk(ctx, *mi.PeerId, maddr) - if err != nil { - printErr(xerrors.Errorf("failed to query ask for miner %s: %w", maddr.String(), err)) - state = "miner" - continue uiLoop - } - - ask = append(ask, *a.Response) - } - - // TODO: run more validation - state = "confirm" - case "confirm": - // TODO: do some more or epochs math (round to miner PP, deal start buffer) - - afmt.Printf("-----\n") - afmt.Printf("Proposing from %s\n", a) - afmt.Printf("\tBalance: %s\n", types.FIL(fromBal)) - afmt.Printf("\n") - afmt.Printf("Piece size: %s (Payload size: %s)\n", units.BytesSize(float64(ds.PieceSize)), units.BytesSize(float64(ds.PayloadSize))) - afmt.Printf("Duration: %s\n", dur) - - pricePerGib := big.Zero() - for _, a := range ask { - p := a.Price - if verified { - p = a.VerifiedPrice - } - pricePerGib = big.Add(pricePerGib, p) - epochPrice := types.BigDiv(types.BigMul(p, types.NewInt(uint64(ds.PieceSize))), gib) - epochPrices = append(epochPrices, epochPrice) - - mpow, err := api.StateMinerPower(ctx, a.Miner, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting power (%s): %w", a.Miner, err) - } - - if len(ask) > 1 { - totalPrice := types.BigMul(epochPrice, types.NewInt(uint64(epochs))) - afmt.Printf("Miner %s (Power:%s) price: ~%s (%s per epoch)\n", color.YellowString(a.Miner.String()), color.GreenString(types.SizeStr(mpow.MinerPower.QualityAdjPower)), color.BlueString(types.FIL(totalPrice).String()), types.FIL(epochPrice)) - } - } - - // TODO: price is based on PaddedPieceSize, right? - epochPrice := types.BigDiv(types.BigMul(pricePerGib, types.NewInt(uint64(ds.PieceSize))), gib) - totalPrice := types.BigMul(epochPrice, types.NewInt(uint64(epochs))) - - afmt.Printf("Total price: ~%s (%s per epoch)\n", color.CyanString(types.FIL(totalPrice).String()), types.FIL(epochPrice)) - afmt.Printf("Verified: %v\n", verified) - - state = "accept" - case "accept": - afmt.Print("\nAccept (yes/no): ") - - _yn, _, err := rl.ReadLine() - yn := string(_yn) - if err != nil { - return err - } - - if yn == "no" { - return nil - } - - if yn != "yes" { - afmt.Println("Type in full 'yes' or 'no'") - continue - } - - state = "execute" - case "execute": - color.Blue(".. executing\n") - - for i, maddr := range maddrs { - proposal, err := api.ClientStartDeal(ctx, &lapi.StartDealParams{ - Data: &storagemarket.DataRef{ - TransferType: storagemarket.TTGraphsync, - Root: data, - - PieceCid: &ds.PieceCID, - PieceSize: ds.PieceSize.Unpadded(), - }, - Wallet: a, - Miner: maddr, - EpochPrice: epochPrices[i], - MinBlocksDuration: uint64(epochs), - DealStartEpoch: abi.ChainEpoch(cctx.Int64("start-epoch")), - FastRetrieval: cctx.Bool("fast-retrieval"), - VerifiedDeal: verified, - }) - if err != nil { - return err - } - - encoder, err := GetCidEncoder(cctx) - if err != nil { - return err - } - - afmt.Printf("Deal (%s) CID: %s\n", maddr, color.GreenString(encoder.Encode(*proposal))) - } - - return nil - default: - return xerrors.Errorf("unknown state: %s", state) - } - } -} - -var clientFindCmd = &cli.Command{ - Name: "find", - Usage: "Find data in the network", - ArgsUsage: "[dataCid]", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "pieceCid", - Usage: "require data to be retrieved from a specific Piece CID", - }, - }, - Action: func(cctx *cli.Context) error { - if cctx.NArg() != 1 { - return IncorrectNumArgs(cctx) - } - - file, err := cid.Parse(cctx.Args().First()) - if err != nil { - return err - } - - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - - // Check if we already have this data locally - - has, err := api.ClientHasLocal(ctx, file) - if err != nil { - return err - } - - if has { - fmt.Println("LOCAL") - } - - var pieceCid *cid.Cid - if cctx.String("pieceCid") != "" { - parsed, err := cid.Parse(cctx.String("pieceCid")) - if err != nil { - return err - } - pieceCid = &parsed - } - - offers, err := api.ClientFindData(ctx, file, pieceCid) - if err != nil { - return err - } - - for _, offer := range offers { - if offer.Err != "" { - fmt.Printf("ERR %s@%s: %s\n", offer.Miner, offer.MinerPeer.ID, offer.Err) - continue - } - fmt.Printf("RETRIEVAL %s@%s-%s-%s\n", offer.Miner, offer.MinerPeer.ID, types.FIL(offer.MinPrice), types.SizeStr(types.NewInt(offer.Size))) - } - - return nil - }, -} - -var clientQueryRetrievalAskCmd = &cli.Command{ - Name: "retrieval-ask", - Usage: "Get a miner's retrieval ask", - ArgsUsage: "[minerAddress] [data CID]", - Flags: []cli.Flag{ - &cli.Int64Flag{ - Name: "size", - Usage: "data size in bytes", - }, - }, - Action: func(cctx *cli.Context) error { - afmt := NewAppFmt(cctx.App) - if cctx.NArg() != 2 { - return IncorrectNumArgs(cctx) - } - - maddr, err := address.NewFromString(cctx.Args().First()) - if err != nil { - return err - } - - dataCid, err := cid.Parse(cctx.Args().Get(1)) - if err != nil { - return fmt.Errorf("parsing data cid: %w", err) - } - - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - - ask, err := api.ClientMinerQueryOffer(ctx, maddr, dataCid, nil) - if err != nil { - return err - } - - afmt.Printf("Ask: %s\n", maddr) - afmt.Printf("Unseal price: %s\n", types.FIL(ask.UnsealPrice)) - afmt.Printf("Price per byte: %s\n", types.FIL(ask.PricePerByte)) - afmt.Printf("Payment interval: %s\n", types.SizeStr(types.NewInt(ask.PaymentInterval))) - afmt.Printf("Payment interval increase: %s\n", types.SizeStr(types.NewInt(ask.PaymentIntervalIncrease))) - - size := cctx.Uint64("size") - if size == 0 { - if ask.Size == 0 { - return nil - } - size = ask.Size - afmt.Printf("Size: %s\n", types.SizeStr(types.NewInt(ask.Size))) - } - transferPrice := types.BigMul(ask.PricePerByte, types.NewInt(size)) - totalPrice := types.BigAdd(ask.UnsealPrice, transferPrice) - afmt.Printf("Total price for %d bytes: %s\n", size, types.FIL(totalPrice)) - - return nil - }, -} - -var clientListRetrievalsCmd = &cli.Command{ - Name: "list-retrievals", - Usage: "List retrieval market deals", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "verbose", - Aliases: []string{"v"}, - Usage: "print verbose deal details", - }, - &cli.BoolFlag{ - Name: "show-failed", - Usage: "show failed/failing deals", - Value: true, - }, - &cli.BoolFlag{ - Name: "completed", - Usage: "show completed retrievals", - }, - &cli.BoolFlag{ - Name: "watch", - Usage: "watch deal updates in real-time, rather than a one time list", - }, - }, - Action: func(cctx *cli.Context) error { - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - - verbose := cctx.Bool("verbose") - watch := cctx.Bool("watch") - showFailed := cctx.Bool("show-failed") - completed := cctx.Bool("completed") - - localDeals, err := api.ClientListRetrievals(ctx) - if err != nil { - return err - } - - if watch { - updates, err := api.ClientGetRetrievalUpdates(ctx) - if err != nil { - return err - } - - for { - tm.Clear() - tm.MoveCursor(1, 1) - - err = outputRetrievalDeals(ctx, tm.Screen, localDeals, verbose, showFailed, completed) - if err != nil { - return err - } - - tm.Flush() - - select { - case <-ctx.Done(): - return nil - case updated := <-updates: - var found bool - for i, existing := range localDeals { - if existing.ID == updated.ID { - localDeals[i] = updated - found = true - break - } - } - if !found { - localDeals = append(localDeals, updated) - } - } - } - } - - return outputRetrievalDeals(ctx, cctx.App.Writer, localDeals, verbose, showFailed, completed) - }, -} - -func isTerminalError(status retrievalmarket.DealStatus) bool { - // should patch this in go-fil-markets but to solve the problem immediate and not have buggy output - return retrievalmarket.IsTerminalError(status) || status == retrievalmarket.DealStatusErrored || status == retrievalmarket.DealStatusCancelled -} -func outputRetrievalDeals(ctx context.Context, out io.Writer, localDeals []lapi.RetrievalInfo, verbose bool, showFailed bool, completed bool) error { - var deals []api.RetrievalInfo - for _, deal := range localDeals { - if !showFailed && isTerminalError(deal.Status) { - continue - } - if !completed && retrievalmarket.IsTerminalSuccess(deal.Status) { - continue - } - deals = append(deals, deal) - } - - tableColumns := []tablewriter.Column{ - tablewriter.Col("PayloadCID"), - tablewriter.Col("DealId"), - tablewriter.Col("Provider"), - tablewriter.Col("Status"), - tablewriter.Col("PricePerByte"), - tablewriter.Col("Received"), - tablewriter.Col("TotalPaid"), - } - - if verbose { - tableColumns = append(tableColumns, - tablewriter.Col("PieceCID"), - tablewriter.Col("UnsealPrice"), - tablewriter.Col("BytesPaidFor"), - tablewriter.Col("TransferChannelID"), - tablewriter.Col("TransferStatus"), - ) - } - tableColumns = append(tableColumns, tablewriter.NewLineCol("Message")) - - w := tablewriter.New(tableColumns...) - - for _, d := range deals { - w.Write(toRetrievalOutput(d, verbose)) - } - - return w.Flush(out) -} - -func toRetrievalOutput(d api.RetrievalInfo, verbose bool) map[string]interface{} { - - payloadCID := d.PayloadCID.String() - provider := d.Provider.String() - if !verbose { - payloadCID = ellipsis(payloadCID, 8) - provider = ellipsis(provider, 8) - } - - retrievalOutput := map[string]interface{}{ - "PayloadCID": payloadCID, - "DealId": d.ID, - "Provider": provider, - "Status": retrievalStatusString(d.Status), - "PricePerByte": types.FIL(d.PricePerByte), - "Received": units.BytesSize(float64(d.BytesReceived)), - "TotalPaid": types.FIL(d.TotalPaid), - "Message": d.Message, - } - - if verbose { - transferChannelID := "" - if d.TransferChannelID != nil { - transferChannelID = d.TransferChannelID.String() - } - transferStatus := "" - if d.DataTransfer != nil { - transferStatus = datatransfer.Statuses[d.DataTransfer.Status] - } - pieceCID := "" - if d.PieceCID != nil { - pieceCID = d.PieceCID.String() - } - - retrievalOutput["PieceCID"] = pieceCID - retrievalOutput["UnsealPrice"] = types.FIL(d.UnsealPrice) - retrievalOutput["BytesPaidFor"] = units.BytesSize(float64(d.BytesPaidFor)) - retrievalOutput["TransferChannelID"] = transferChannelID - retrievalOutput["TransferStatus"] = transferStatus - } - return retrievalOutput -} - -func retrievalStatusString(status retrievalmarket.DealStatus) string { - s := retrievalmarket.DealStatuses[status] - - switch { - case isTerminalError(status): - return color.RedString(s) - case retrievalmarket.IsTerminalSuccess(status): - return color.GreenString(s) - default: - return s - } -} - -var clientInspectDealCmd = &cli.Command{ - Name: "inspect-deal", - Usage: "Inspect detailed information about deal's lifecycle and the various stages it goes through", - Flags: []cli.Flag{ - &cli.IntFlag{ - Name: "deal-id", - }, - &cli.StringFlag{ - Name: "proposal-cid", - }, - }, - Action: func(cctx *cli.Context) error { - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := ReqContext(cctx) - return inspectDealCmd(ctx, api, cctx.String("proposal-cid"), cctx.Int("deal-id")) - }, -} - -var clientDealStatsCmd = &cli.Command{ - Name: "deal-stats", - Usage: "Print statistics about local storage deals", - Flags: []cli.Flag{ - &cli.DurationFlag{ - Name: "newer-than", - }, - }, - Action: func(cctx *cli.Context) error { - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - - localDeals, err := api.ClientListDeals(ctx) - if err != nil { - return err - } - - var totalSize uint64 - byState := map[storagemarket.StorageDealStatus][]uint64{} - for _, deal := range localDeals { - if cctx.IsSet("newer-than") { - if time.Now().Sub(deal.CreationTime) > cctx.Duration("newer-than") { - continue - } - } - - totalSize += deal.Size - byState[deal.State] = append(byState[deal.State], deal.Size) - } - - fmt.Printf("Total: %d deals, %s\n", len(localDeals), types.SizeStr(types.NewInt(totalSize))) - - type stateStat struct { - state storagemarket.StorageDealStatus - count int - bytes uint64 - } - - stateStats := make([]stateStat, 0, len(byState)) - for state, deals := range byState { - if state == storagemarket.StorageDealActive { - state = math.MaxUint64 // for sort - } - - st := stateStat{ - state: state, - count: len(deals), - } - for _, b := range deals { - st.bytes += b - } - - stateStats = append(stateStats, st) - } - - sort.Slice(stateStats, func(i, j int) bool { - return int64(stateStats[i].state) < int64(stateStats[j].state) - }) - - for _, st := range stateStats { - if st.state == math.MaxUint64 { - st.state = storagemarket.StorageDealActive - } - fmt.Printf("%s: %d deals, %s\n", storagemarket.DealStates[st.state], st.count, types.SizeStr(types.NewInt(st.bytes))) - } - - return nil - }, -} - -var clientListAsksCmd = &cli.Command{ - Name: "list-asks", - Usage: "List asks for top miners", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "by-ping", - Usage: "sort by ping", - }, - &cli.StringFlag{ - Name: "output-format", - Value: "text", - Usage: "Either 'text' or 'csv'", - }, - &cli.BoolFlag{ - Name: "protocols", - Usage: "Output supported deal protocols", - }, - }, - Action: func(cctx *cli.Context) error { - api, closer, err := GetFullNodeAPIV1(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - - asks, err := GetAsks(ctx, api) - if err != nil { - return err - } - - if cctx.Bool("by-ping") { - sort.Slice(asks, func(i, j int) bool { - return asks[i].Ping < asks[j].Ping - }) - } - pfmt := "%s: min:%s max:%s price:%s/GiB/Epoch verifiedPrice:%s/GiB/Epoch ping:%s protos:%s\n" - if cctx.String("output-format") == "csv" { - fmt.Printf("Miner,Min,Max,Price,VerifiedPrice,Ping,Protocols") - pfmt = "%s,%s,%s,%s,%s,%s,%s\n" - } - - for _, a := range asks { - ask := a.Ask - - protos := "" - if cctx.Bool("protocols") { - protos = "[" + strings.Join(a.DealProtocols, ",") + "]" - } - - fmt.Printf(pfmt, ask.Miner, - types.SizeStr(types.NewInt(uint64(ask.MinPieceSize))), - types.SizeStr(types.NewInt(uint64(ask.MaxPieceSize))), - types.FIL(ask.Price), - types.FIL(ask.VerifiedPrice), - a.Ping, - protos, - ) - } - - return nil - }, -} - -type QueriedAsk struct { - Ask *storagemarket.StorageAsk - DealProtocols []string - - Ping time.Duration -} - -func GetAsks(ctx context.Context, api lapi.FullNode) ([]QueriedAsk, error) { - isTTY := true - if fileInfo, _ := os.Stdout.Stat(); (fileInfo.Mode() & os.ModeCharDevice) == 0 { - isTTY = false - } - if isTTY { - color.Blue(".. getting miner list") - } - miners, err := api.StateListMiners(ctx, types.EmptyTSK) - if err != nil { - return nil, xerrors.Errorf("getting miner list: %w", err) - } - - var lk sync.Mutex - var found int64 - var withMinPower []address.Address - done := make(chan struct{}) - - go func() { - defer close(done) - - var wg sync.WaitGroup - wg.Add(len(miners)) - - throttle := make(chan struct{}, 50) - for _, miner := range miners { - throttle <- struct{}{} - go func(miner address.Address) { - defer wg.Done() - defer func() { - <-throttle - }() - - power, err := api.StateMinerPower(ctx, miner, types.EmptyTSK) - if err != nil { - return - } - - if power.HasMinPower { // TODO: Lower threshold - atomic.AddInt64(&found, 1) - lk.Lock() - withMinPower = append(withMinPower, miner) - lk.Unlock() - } - }(miner) - } - - wg.Wait() - }() - -loop: - for { - select { - case <-time.After(150 * time.Millisecond): - if isTTY { - fmt.Printf("\r* Found %d miners with power", atomic.LoadInt64(&found)) - } - case <-done: - break loop - } - } - if isTTY { - fmt.Printf("\r* Found %d miners with power\n", atomic.LoadInt64(&found)) - - color.Blue(".. querying asks") - } - - var asks []QueriedAsk - var queried, got int64 - - done = make(chan struct{}) - go func() { - defer close(done) - - var wg sync.WaitGroup - wg.Add(len(withMinPower)) - - throttle := make(chan struct{}, 50) - for _, miner := range withMinPower { - throttle <- struct{}{} - go func(miner address.Address) { - defer wg.Done() - defer func() { - <-throttle - atomic.AddInt64(&queried, 1) - }() - - ctx, cancel := context.WithTimeout(ctx, 4*time.Second) - defer cancel() - - mi, err := api.StateMinerInfo(ctx, miner, types.EmptyTSK) - if err != nil { - return - } - if mi.PeerId == nil { - return - } - - ask, err := api.ClientQueryAsk(ctx, *mi.PeerId, miner) - if err != nil { - return - } - - rt := time.Now() - _, err = api.ClientQueryAsk(ctx, *mi.PeerId, miner) - if err != nil { - return - } - pingDuration := time.Now().Sub(rt) - - atomic.AddInt64(&got, 1) - lk.Lock() - asks = append(asks, QueriedAsk{ - Ask: ask.Response, - DealProtocols: ask.DealProtocols, - - Ping: pingDuration, - }) - lk.Unlock() - }(miner) - } - - wg.Wait() - }() - -loop2: - for { - select { - case <-time.After(150 * time.Millisecond): - if isTTY { - fmt.Printf("\r* Queried %d asks, got %d responses", atomic.LoadInt64(&queried), atomic.LoadInt64(&got)) - } - case <-done: - break loop2 - } - } - if isTTY { - fmt.Printf("\r* Queried %d asks, got %d responses\n", atomic.LoadInt64(&queried), atomic.LoadInt64(&got)) - } - - sort.Slice(asks, func(i, j int) bool { - return asks[i].Ask.Price.LessThan(asks[j].Ask.Price) - }) - - return asks, nil -} - -var clientQueryAskCmd = &cli.Command{ - Name: "query-ask", - Usage: "Find a miners ask", - ArgsUsage: "[minerAddress]", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "peerid", - Usage: "specify peer ID of node to make query against", - }, - &cli.Int64Flag{ - Name: "size", - Usage: "data size in bytes", - }, - &cli.Int64Flag{ - Name: "duration", - Usage: "deal duration", - }, - }, - Action: func(cctx *cli.Context) error { - afmt := NewAppFmt(cctx.App) - if cctx.NArg() != 1 { - return IncorrectNumArgs(cctx) - } - - maddr, err := address.NewFromString(cctx.Args().First()) - if err != nil { - return err - } - - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - - var pid peer.ID - if pidstr := cctx.String("peerid"); pidstr != "" { - p, err := peer.Decode(pidstr) - if err != nil { - return err - } - pid = p - } else { - mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("failed to get peerID for miner: %w", err) - } - - if mi.PeerId == nil || *mi.PeerId == ("SETME") { - return fmt.Errorf("the miner hasn't initialized yet") - } - - pid = *mi.PeerId - } - - ask, err := api.ClientQueryAsk(ctx, pid, maddr) - if err != nil { - return err - } - - afmt.Printf("Ask: %s\n", maddr) - afmt.Printf("Price per GiB: %s\n", types.FIL(ask.Price)) - afmt.Printf("Verified Price per GiB: %s\n", types.FIL(ask.VerifiedPrice)) - afmt.Printf("Max Piece size: %s\n", types.SizeStr(types.NewInt(uint64(ask.MaxPieceSize)))) - afmt.Printf("Min Piece size: %s\n", types.SizeStr(types.NewInt(uint64(ask.MinPieceSize)))) - - size := cctx.Int64("size") - if size == 0 { - return nil - } - perEpoch := types.BigDiv(types.BigMul(ask.Price, types.NewInt(uint64(size))), types.NewInt(1<<30)) - afmt.Printf("Price per Block: %s\n", types.FIL(perEpoch)) - - duration := cctx.Int64("duration") - if duration == 0 { - return nil - } - afmt.Printf("Total Price: %s\n", types.FIL(types.BigMul(perEpoch, types.NewInt(uint64(duration))))) - - return nil - }, -} - -var clientListDeals = &cli.Command{ - Name: "list-deals", - Usage: "List storage market deals", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "verbose", - Aliases: []string{"v"}, - Usage: "print verbose deal details", - }, - &cli.BoolFlag{ - Name: "show-failed", - Usage: "show failed/failing deals", - }, - &cli.BoolFlag{ - Name: "watch", - Usage: "watch deal updates in real-time, rather than a one time list", - }, - }, - Action: func(cctx *cli.Context) error { - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - - verbose := cctx.Bool("verbose") - watch := cctx.Bool("watch") - showFailed := cctx.Bool("show-failed") - - localDeals, err := api.ClientListDeals(ctx) - if err != nil { - return err - } - - if watch { - updates, err := api.ClientGetDealUpdates(ctx) - if err != nil { - return err - } - - for { - tm.Clear() - tm.MoveCursor(1, 1) - - err = outputStorageDeals(ctx, tm.Screen, api, localDeals, verbose, showFailed) - if err != nil { - return err - } - - tm.Flush() - - select { - case <-ctx.Done(): - return nil - case updated := <-updates: - var found bool - for i, existing := range localDeals { - if existing.ProposalCid.Equals(updated.ProposalCid) { - localDeals[i] = updated - found = true - break - } - } - if !found { - localDeals = append(localDeals, updated) - } - } - } - } - - return outputStorageDeals(ctx, cctx.App.Writer, api, localDeals, verbose, showFailed) - }, -} - -func dealFromDealInfo(ctx context.Context, full v0api.FullNode, head *types.TipSet, v api.DealInfo) deal { - if v.DealID == 0 { - return deal{ - LocalDeal: v, - OnChainDealState: market.EmptyDealState(), - } - } - - onChain, err := full.StateMarketStorageDeal(ctx, v.DealID, head.Key()) - if err != nil { - return deal{LocalDeal: v} - } - - return deal{ - LocalDeal: v, - OnChainDealState: onChain.State.Iface(), - } -} - -func outputStorageDeals(ctx context.Context, out io.Writer, full v0api.FullNode, localDeals []lapi.DealInfo, verbose bool, showFailed bool) error { - sort.Slice(localDeals, func(i, j int) bool { - return localDeals[i].CreationTime.Before(localDeals[j].CreationTime) - }) - - head, err := full.ChainHead(ctx) - if err != nil { - return err - } - - var deals []deal - for _, localDeal := range localDeals { - if showFailed || localDeal.State != storagemarket.StorageDealError { - deals = append(deals, dealFromDealInfo(ctx, full, head, localDeal)) - } - } - - if verbose { - w := tabwriter.NewWriter(out, 2, 4, 2, ' ', 0) - fmt.Fprintf(w, "Created\tDealCid\tDealId\tProvider\tState\tOn Chain?\tSlashed?\tPieceCID\tSize\tPrice\tDuration\tTransferChannelID\tTransferStatus\tVerified\tMessage\n") - for _, d := range deals { - onChain := "N" - if d.OnChainDealState.SectorStartEpoch() != -1 { - onChain = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SectorStartEpoch()) - } - - slashed := "N" - if d.OnChainDealState.SlashEpoch() != -1 { - slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch()) - } - - price := types.FIL(types.BigMul(d.LocalDeal.PricePerEpoch, types.NewInt(d.LocalDeal.Duration))) - transferChannelID := "" - if d.LocalDeal.TransferChannelID != nil { - transferChannelID = d.LocalDeal.TransferChannelID.String() - } - transferStatus := "" - if d.LocalDeal.DataTransfer != nil { - transferStatus = datatransfer.Statuses[d.LocalDeal.DataTransfer.Status] - // TODO: Include the transferred percentage once this bug is fixed: - // https://github.com/ipfs/go-graphsync/issues/126 - //fmt.Printf("transferred: %d / size: %d\n", d.LocalDeal.DataTransfer.Transferred, d.LocalDeal.Size) - //if d.LocalDeal.Size > 0 { - // pct := (100 * d.LocalDeal.DataTransfer.Transferred) / d.LocalDeal.Size - // transferPct = fmt.Sprintf("%d%%", pct) - //} - } - fmt.Fprintf(w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%v\t%s\n", - d.LocalDeal.CreationTime.Format(time.Stamp), - d.LocalDeal.ProposalCid, - d.LocalDeal.DealID, - d.LocalDeal.Provider, - dealStateString(d.LocalDeal.State), - onChain, - slashed, - d.LocalDeal.PieceCID, - types.SizeStr(types.NewInt(d.LocalDeal.Size)), - price, - d.LocalDeal.Duration, - transferChannelID, - transferStatus, - d.LocalDeal.Verified, - d.LocalDeal.Message) - } - return w.Flush() - } - - w := tablewriter.New(tablewriter.Col("DealCid"), - tablewriter.Col("DealId"), - tablewriter.Col("Provider"), - tablewriter.Col("State"), - tablewriter.Col("On Chain?"), - tablewriter.Col("Slashed?"), - tablewriter.Col("PieceCID"), - tablewriter.Col("Size"), - tablewriter.Col("Price"), - tablewriter.Col("Duration"), - tablewriter.Col("Verified"), - tablewriter.NewLineCol("Message")) - - for _, d := range deals { - propcid := ellipsis(d.LocalDeal.ProposalCid.String(), 8) - - onChain := "N" - if d.OnChainDealState.SectorStartEpoch() != -1 { - onChain = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SectorStartEpoch()) - } - - slashed := "N" - if d.OnChainDealState.SlashEpoch() != -1 { - slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch()) - } - - piece := ellipsis(d.LocalDeal.PieceCID.String(), 8) - - price := types.FIL(types.BigMul(d.LocalDeal.PricePerEpoch, types.NewInt(d.LocalDeal.Duration))) - - w.Write(map[string]interface{}{ - "DealCid": propcid, - "DealId": d.LocalDeal.DealID, - "Provider": d.LocalDeal.Provider, - "State": dealStateString(d.LocalDeal.State), - "On Chain?": onChain, - "Slashed?": slashed, - "PieceCID": piece, - "Size": types.SizeStr(types.NewInt(d.LocalDeal.Size)), - "Price": price, - "Verified": d.LocalDeal.Verified, - "Duration": d.LocalDeal.Duration, - "Message": d.LocalDeal.Message, - }) - } - - return w.Flush(out) -} - -func dealStateString(state storagemarket.StorageDealStatus) string { - s := storagemarket.DealStates[state] - switch state { - case storagemarket.StorageDealError, storagemarket.StorageDealExpired: - return color.RedString(s) - case storagemarket.StorageDealActive: - return color.GreenString(s) - default: - return s - } -} - -type deal struct { - LocalDeal lapi.DealInfo - OnChainDealState market.DealState -} - -var clientGetDealCmd = &cli.Command{ - Name: "get-deal", - Usage: "Print detailed deal information", - ArgsUsage: "[proposalCID]", - Action: func(cctx *cli.Context) error { - if cctx.NArg() != 1 { - return IncorrectNumArgs(cctx) - } - - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - - propcid, err := cid.Decode(cctx.Args().First()) - if err != nil { - return err - } - - di, err := api.ClientGetDealInfo(ctx, propcid) - if err != nil { - return err - } - - out := map[string]interface{}{ - "DealInfo: ": di, - } - - if di.DealID != 0 { - onChain, err := api.StateMarketStorageDeal(ctx, di.DealID, types.EmptyTSK) - if err != nil { - return err - } - - out["OnChain"] = onChain - } - - b, err := json.MarshalIndent(out, "", " ") - if err != nil { - return err - } - fmt.Println(string(b)) - return nil - }, -} - -var clientBalancesCmd = &cli.Command{ - Name: "balances", - Usage: "Print storage market client balances", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "client", - Usage: "specify storage client address", - }, - }, - Action: func(cctx *cli.Context) error { - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - - var addr address.Address - if clientFlag := cctx.String("client"); clientFlag != "" { - ca, err := address.NewFromString(clientFlag) - if err != nil { - return err - } - - addr = ca - } else { - def, err := api.WalletDefaultAddress(ctx) - if err != nil { - return err - } - addr = def - } - - balance, err := api.StateMarketBalance(ctx, addr, types.EmptyTSK) - if err != nil { - return err - } - - reserved, err := api.MarketGetReserved(ctx, addr) - if err != nil { - return err - } - - avail := big.Sub(big.Sub(balance.Escrow, balance.Locked), reserved) - if avail.LessThan(big.Zero()) { - avail = big.Zero() - } - - fmt.Printf("Client Market Balance for address %s:\n", addr) - - fmt.Printf(" Escrowed Funds: %s\n", types.FIL(balance.Escrow)) - fmt.Printf(" Locked Funds: %s\n", types.FIL(balance.Locked)) - fmt.Printf(" Reserved Funds: %s\n", types.FIL(reserved)) - fmt.Printf(" Available to Withdraw: %s\n", types.FIL(avail)) - - return nil - }, -} - -var clientStat = &cli.Command{ - Name: "stat", - Usage: "Print information about a locally stored file (piece size, etc)", - ArgsUsage: "", - Action: func(cctx *cli.Context) error { - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - - if cctx.NArg() != 1 { - return IncorrectNumArgs(cctx) - } - - dataCid, err := cid.Parse(cctx.Args().First()) - if err != nil { - return fmt.Errorf("parsing data cid: %w", err) - } - - ds, err := api.ClientDealSize(ctx, dataCid) - if err != nil { - return err - } - - fmt.Printf("Piece Size : %v\n", ds.PieceSize) - fmt.Printf("Payload Size: %v\n", ds.PayloadSize) - - return nil - }, -} - -var clientRestartTransfer = &cli.Command{ - Name: "restart-transfer", - Usage: "Force restart a stalled data transfer", - ArgsUsage: "[transferID]", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "peerid", - Usage: "narrow to transfer with specific peer", - }, - &cli.BoolFlag{ - Name: "initiator", - Usage: "specify only transfers where peer is/is not initiator", - Value: true, - }, - }, - Action: func(cctx *cli.Context) error { - if cctx.NArg() != 1 { - return IncorrectNumArgs(cctx) - } - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - - transferUint, err := strconv.ParseUint(cctx.Args().First(), 10, 64) - if err != nil { - return fmt.Errorf("Error reading transfer ID: %w", err) - } - transferID := datatransfer.TransferID(transferUint) - initiator := cctx.Bool("initiator") - var other peer.ID - if pidstr := cctx.String("peerid"); pidstr != "" { - p, err := peer.Decode(pidstr) - if err != nil { - return err - } - other = p - } else { - channels, err := api.ClientListDataTransfers(ctx) - if err != nil { - return err - } - found := false - for _, channel := range channels { - if channel.IsInitiator == initiator && channel.TransferID == transferID { - other = channel.OtherPeer - found = true - break - } - } - if !found { - return errors.New("unable to find matching data transfer") - } - } - - return api.ClientRestartDataTransfer(ctx, transferID, other, initiator) - }, -} - -var clientCancelTransfer = &cli.Command{ - Name: "cancel-transfer", - Usage: "Force cancel a data transfer", - ArgsUsage: "[transferID]", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "peerid", - Usage: "narrow to transfer with specific peer", - }, - &cli.BoolFlag{ - Name: "initiator", - Usage: "specify only transfers where peer is/is not initiator", - Value: true, - }, - &cli.DurationFlag{ - Name: "cancel-timeout", - Usage: "time to wait for cancel to be sent to storage provider", - Value: 5 * time.Second, - }, - }, - Action: func(cctx *cli.Context) error { - if cctx.NArg() != 1 { - return IncorrectNumArgs(cctx) - } - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - - transferUint, err := strconv.ParseUint(cctx.Args().First(), 10, 64) - if err != nil { - return fmt.Errorf("Error reading transfer ID: %w", err) - } - transferID := datatransfer.TransferID(transferUint) - initiator := cctx.Bool("initiator") - var other peer.ID - if pidstr := cctx.String("peerid"); pidstr != "" { - p, err := peer.Decode(pidstr) - if err != nil { - return err - } - other = p - } else { - channels, err := api.ClientListDataTransfers(ctx) - if err != nil { - return err - } - found := false - for _, channel := range channels { - if channel.IsInitiator == initiator && channel.TransferID == transferID { - other = channel.OtherPeer - found = true - break - } - } - if !found { - return errors.New("unable to find matching data transfer") - } - } - - timeoutCtx, cancel := context.WithTimeout(ctx, cctx.Duration("cancel-timeout")) - defer cancel() - return api.ClientCancelDataTransfer(timeoutCtx, transferID, other, initiator) - }, -} - -var clientCancelRetrievalDealCmd = &cli.Command{ - Name: "cancel-retrieval", - Usage: "Cancel a retrieval deal by deal ID; this also cancels the associated transfer", - Flags: []cli.Flag{ - &cli.Int64Flag{ - Name: "deal-id", - Usage: "specify retrieval deal by deal ID", - Required: true, - }, - }, - Action: func(cctx *cli.Context) error { - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - - id := cctx.Int64("deal-id") - if id < 0 { - return errors.New("deal id cannot be negative") - } - - return api.ClientCancelRetrievalDeal(ctx, retrievalmarket.DealID(id)) - }, -} - -var clientListTransfers = &cli.Command{ - Name: "list-transfers", - Usage: "List ongoing data transfers for deals", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "verbose", - Aliases: []string{"v"}, - Usage: "print verbose transfer details", - }, - &cli.BoolFlag{ - Name: "completed", - Usage: "show completed data transfers", - }, - &cli.BoolFlag{ - Name: "watch", - Usage: "watch deal updates in real-time, rather than a one time list", - }, - &cli.BoolFlag{ - Name: "show-failed", - Usage: "show failed/cancelled transfers", - }, - }, - Action: func(cctx *cli.Context) error { - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - - channels, err := api.ClientListDataTransfers(ctx) - if err != nil { - return err - } - - verbose := cctx.Bool("verbose") - completed := cctx.Bool("completed") - watch := cctx.Bool("watch") - showFailed := cctx.Bool("show-failed") - if watch { - channelUpdates, err := api.ClientDataTransferUpdates(ctx) - if err != nil { - return err - } - - for { - tm.Clear() // Clear current screen - - tm.MoveCursor(1, 1) - - OutputDataTransferChannels(tm.Screen, channels, verbose, completed, showFailed) - - tm.Flush() - - select { - case <-ctx.Done(): - return nil - case channelUpdate := <-channelUpdates: - var found bool - for i, existing := range channels { - if existing.TransferID == channelUpdate.TransferID && - existing.OtherPeer == channelUpdate.OtherPeer && - existing.IsSender == channelUpdate.IsSender && - existing.IsInitiator == channelUpdate.IsInitiator { - channels[i] = channelUpdate - found = true - break - } - } - if !found { - channels = append(channels, channelUpdate) - } - } - } - } - OutputDataTransferChannels(os.Stdout, channels, verbose, completed, showFailed) - return nil - }, -} - -// OutputDataTransferChannels generates table output for a list of channels -func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChannel, verbose, completed, showFailed bool) { - sort.Slice(channels, func(i, j int) bool { - return channels[i].TransferID < channels[j].TransferID - }) - - var receivingChannels, sendingChannels []lapi.DataTransferChannel - for _, channel := range channels { - if !completed && channel.Status == datatransfer.Completed { - continue - } - if !showFailed && (channel.Status == datatransfer.Failed || channel.Status == datatransfer.Cancelled) { - continue - } - if channel.IsSender { - sendingChannels = append(sendingChannels, channel) - } else { - receivingChannels = append(receivingChannels, channel) - } - } - - fmt.Fprintf(out, "Sending Channels\n\n") - w := tablewriter.New(tablewriter.Col("ID"), - tablewriter.Col("Status"), - tablewriter.Col("Sending To"), - tablewriter.Col("Root Cid"), - tablewriter.Col("Initiated?"), - tablewriter.Col("Transferred"), - tablewriter.Col("Voucher"), - tablewriter.NewLineCol("Message")) - for _, channel := range sendingChannels { - w.Write(toChannelOutput("Sending To", channel, verbose)) - } - _ = w.Flush(out) - - fmt.Fprintf(out, "\nReceiving Channels\n\n") - w = tablewriter.New(tablewriter.Col("ID"), - tablewriter.Col("Status"), - tablewriter.Col("Receiving From"), - tablewriter.Col("Root Cid"), - tablewriter.Col("Initiated?"), - tablewriter.Col("Transferred"), - tablewriter.Col("Voucher"), - tablewriter.NewLineCol("Message")) - for _, channel := range receivingChannels { - w.Write(toChannelOutput("Receiving From", channel, verbose)) - } - _ = w.Flush(out) -} - -func channelStatusString(status datatransfer.Status) string { - s := datatransfer.Statuses[status] - switch status { - case datatransfer.Failed, datatransfer.Cancelled: - return color.RedString(s) - case datatransfer.Completed: - return color.GreenString(s) - default: - return s - } -} - -func toChannelOutput(otherPartyColumn string, channel lapi.DataTransferChannel, verbose bool) map[string]interface{} { - rootCid := channel.BaseCID.String() - otherParty := channel.OtherPeer.String() - if !verbose { - rootCid = ellipsis(rootCid, 8) - otherParty = ellipsis(otherParty, 8) - } - - initiated := "N" - if channel.IsInitiator { - initiated = "Y" - } - - voucher := channel.Voucher - if len(voucher) > 40 && !verbose { - voucher = ellipsis(voucher, 37) - } - - return map[string]interface{}{ - "ID": channel.TransferID, - "Status": channelStatusString(channel.Status), - otherPartyColumn: otherParty, - "Root Cid": rootCid, - "Initiated?": initiated, - "Transferred": units.BytesSize(float64(channel.Transferred)), - "Voucher": voucher, - "Message": channel.Message, - } -} - -func ellipsis(s string, length int) string { - if length > 0 && len(s) > length { - return "..." + s[len(s)-length:] - } - return s -} - -func inspectDealCmd(ctx context.Context, api v0api.FullNode, proposalCid string, dealId int) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - deals, err := api.ClientListDeals(ctx) - if err != nil { - return err - } - - var di *lapi.DealInfo - for i, cdi := range deals { - if proposalCid != "" && cdi.ProposalCid.String() == proposalCid { - di = &deals[i] - break - } - - if dealId != 0 && int(cdi.DealID) == dealId { - di = &deals[i] - break - } - } - - if di == nil { - if proposalCid != "" { - return fmt.Errorf("cannot find deal with proposal cid: %s", proposalCid) - } - if dealId != 0 { - return fmt.Errorf("cannot find deal with deal id: %v", dealId) - } - return errors.New("you must specify proposal cid or deal id in order to inspect a deal") - } - - // populate DealInfo.DealStages and DataTransfer.Stages - di, err = api.ClientGetDealInfo(ctx, di.ProposalCid) - if err != nil { - return fmt.Errorf("cannot get deal info for proposal cid: %v", di.ProposalCid) - } - - renderDeal(di) - - return nil -} - -func renderDeal(di *lapi.DealInfo) { - color.Blue("Deal ID: %d\n", int(di.DealID)) - color.Blue("Proposal CID: %s\n\n", di.ProposalCid.String()) - - if di.DealStages == nil { - color.Yellow("Deal was made with an older version of Lotus and Lotus did not collect detailed information about its stages") - return - } - - for _, stg := range di.DealStages.Stages { - msg := fmt.Sprintf("%s %s: %s (expected duration: %s)", color.BlueString("Stage:"), color.BlueString(strings.TrimPrefix(stg.Name, "StorageDeal")), stg.Description, color.GreenString(stg.ExpectedDuration)) - if stg.UpdatedTime.Time().IsZero() { - msg = color.YellowString(msg) - } - fmt.Println(msg) - - for _, l := range stg.Logs { - fmt.Printf(" %s %s\n", color.YellowString(l.UpdatedTime.Time().UTC().Round(time.Second).Format(time.Stamp)), l.Log) - } - - if stg.Name == "StorageDealStartDataTransfer" { - for _, dtStg := range di.DataTransfer.Stages.Stages { - fmt.Printf(" %s %s %s\n", color.YellowString(dtStg.CreatedTime.Time().UTC().Round(time.Second).Format(time.Stamp)), color.BlueString("Data transfer stage:"), color.BlueString(dtStg.Name)) - for _, l := range dtStg.Logs { - fmt.Printf(" %s %s\n", color.YellowString(l.UpdatedTime.Time().UTC().Round(time.Second).Format(time.Stamp)), l.Log) - } - } - } - } -} diff --git a/cli/client_retr.go b/cli/client_retr.go deleted file mode 100644 index fa8164ab5ef..00000000000 --- a/cli/client_retr.go +++ /dev/null @@ -1,550 +0,0 @@ -package cli - -import ( - "bytes" - "context" - "fmt" - "io" - "os" - "sort" - "strings" - "time" - - "github.com/ipfs/boxo/blockservice" - offline "github.com/ipfs/boxo/exchange/offline" - "github.com/ipfs/boxo/ipld/merkledag" - "github.com/ipfs/go-cid" - carv2 "github.com/ipld/go-car/v2" - "github.com/ipld/go-car/v2/blockstore" - "github.com/ipld/go-ipld-prime" - "github.com/ipld/go-ipld-prime/codec/dagjson" - basicnode "github.com/ipld/go-ipld-prime/node/basic" - "github.com/ipld/go-ipld-prime/traversal" - "github.com/ipld/go-ipld-prime/traversal/selector" - "github.com/ipld/go-ipld-prime/traversal/selector/builder" - selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" - textselector "github.com/ipld/go-ipld-selector-text-lite" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-state-types/big" - - lapi "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" - cliutil "github.com/filecoin-project/lotus/cli/util" - "github.com/filecoin-project/lotus/markets/utils" - "github.com/filecoin-project/lotus/node/repo" -) - -const DefaultMaxRetrievePrice = "0" - -func retrieve(ctx context.Context, cctx *cli.Context, fapi lapi.FullNode, sel *lapi.Selector, printf func(string, ...interface{})) (*lapi.ExportRef, error) { - var payer address.Address - var err error - if cctx.String("from") != "" { - payer, err = address.NewFromString(cctx.String("from")) - } else { - payer, err = fapi.WalletDefaultAddress(ctx) - } - if err != nil { - return nil, err - } - - file, err := cid.Parse(cctx.Args().Get(0)) - if err != nil { - return nil, err - } - - var pieceCid *cid.Cid - if cctx.String("pieceCid") != "" { - parsed, err := cid.Parse(cctx.String("pieceCid")) - if err != nil { - return nil, err - } - pieceCid = &parsed - } - - var eref *lapi.ExportRef - if cctx.Bool("allow-local") { - imports, err := fapi.ClientListImports(ctx) - if err != nil { - return nil, err - } - - for _, i := range imports { - if i.Root != nil && i.Root.Equals(file) { - eref = &lapi.ExportRef{ - Root: file, - FromLocalCAR: i.CARPath, - } - break - } - } - } - - // no local found, so make a retrieval - if eref == nil { - var offer lapi.QueryOffer - minerStrAddr := cctx.String("provider") - if minerStrAddr == "" { // Local discovery - offers, err := fapi.ClientFindData(ctx, file, pieceCid) - - var cleaned []lapi.QueryOffer - // filter out offers that errored - for _, o := range offers { - if o.Err == "" { - cleaned = append(cleaned, o) - } - } - - offers = cleaned - - // sort by price low to high - sort.Slice(offers, func(i, j int) bool { - return offers[i].MinPrice.LessThan(offers[j].MinPrice) - }) - if err != nil { - return nil, err - } - - // TODO: parse offer strings from `client find`, make this smarter - if len(offers) < 1 { - fmt.Println("Failed to find file") - return nil, nil - } - offer = offers[0] - } else { // Directed retrieval - minerAddr, err := address.NewFromString(minerStrAddr) - if err != nil { - return nil, err - } - offer, err = fapi.ClientMinerQueryOffer(ctx, minerAddr, file, pieceCid) - if err != nil { - return nil, err - } - } - if offer.Err != "" { - return nil, fmt.Errorf("offer error: %s", offer.Err) - } - - maxPrice := types.MustParseFIL(DefaultMaxRetrievePrice) - - if cctx.String("maxPrice") != "" { - maxPrice, err = types.ParseFIL(cctx.String("maxPrice")) - if err != nil { - return nil, xerrors.Errorf("parsing maxPrice: %w", err) - } - } - - if offer.MinPrice.GreaterThan(big.Int(maxPrice)) { - return nil, xerrors.Errorf("failed to find offer satisfying maxPrice: %s. Try increasing maxPrice", maxPrice) - } - - o := offer.Order(payer) - o.DataSelector = sel - - subscribeEvents, err := fapi.ClientGetRetrievalUpdates(ctx) - if err != nil { - return nil, xerrors.Errorf("error setting up retrieval updates: %w", err) - } - retrievalRes, err := fapi.ClientRetrieve(ctx, o) - if err != nil { - return nil, xerrors.Errorf("error setting up retrieval: %w", err) - } - - start := time.Now() - readEvents: - for { - var evt lapi.RetrievalInfo - select { - case <-ctx.Done(): - return nil, xerrors.New("Retrieval Timed Out") - case evt = <-subscribeEvents: - if evt.ID != retrievalRes.DealID { - // we can't check the deal ID ahead of time because: - // 1. We need to subscribe before retrieving. - // 2. We won't know the deal ID until after retrieving. - continue - } - } - - event := "New" - if evt.Event != nil { - event = retrievalmarket.ClientEvents[*evt.Event] - } - - printf("Recv %s, Paid %s, %s (%s), %s [%d|%d]\n", - types.SizeStr(types.NewInt(evt.BytesReceived)), - types.FIL(evt.TotalPaid), - strings.TrimPrefix(event, "ClientEvent"), - strings.TrimPrefix(retrievalmarket.DealStatuses[evt.Status], "DealStatus"), - time.Now().Sub(start).Truncate(time.Millisecond), - evt.ID, - types.NewInt(evt.BytesReceived), - ) - - switch evt.Status { - case retrievalmarket.DealStatusCompleted: - break readEvents - case retrievalmarket.DealStatusRejected: - return nil, xerrors.Errorf("Retrieval Proposal Rejected: %s", evt.Message) - case retrievalmarket.DealStatusCancelled: - return nil, xerrors.Errorf("Retrieval Proposal Cancelled: %s", evt.Message) - case - retrievalmarket.DealStatusDealNotFound, - retrievalmarket.DealStatusErrored: - return nil, xerrors.Errorf("Retrieval Error: %s", evt.Message) - } - } - - eref = &lapi.ExportRef{ - Root: file, - DealID: retrievalRes.DealID, - } - } - - return eref, nil -} - -var retrFlagsCommon = []cli.Flag{ - &cli.StringFlag{ - Name: "from", - Usage: "address to send transactions from", - }, - &cli.StringFlag{ - Name: "provider", - Usage: "provider to use for retrieval, if not present it'll use local discovery", - Aliases: []string{"miner"}, - }, - &cli.StringFlag{ - Name: "maxPrice", - Usage: fmt.Sprintf("maximum price the client is willing to consider (default: %s FIL)", DefaultMaxRetrievePrice), - }, - &cli.StringFlag{ - Name: "pieceCid", - Usage: "require data to be retrieved from a specific Piece CID", - }, - &cli.BoolFlag{ - Name: "allow-local", - // todo: default to true? - }, -} - -var clientRetrieveCmd = &cli.Command{ - Name: "retrieve", - Usage: "Retrieve data from network", - ArgsUsage: "[dataCid outputPath]", - Description: `Retrieve data from the Filecoin network. - -The retrieve command will attempt to find a provider make a retrieval deal with -them. In case a provider can't be found, it can be specified with the --provider -flag. - -By default the data will be interpreted as DAG-PB UnixFSv1 File. Alternatively -a CAR file containing the raw IPLD graph can be exported by setting the --car -flag. - -Partial Retrieval: - -The --data-selector flag can be used to specify a sub-graph to fetch. The -selector can be specified as either IPLD datamodel text-path selector, or IPLD -json selector. - -In case of unixfs retrieval, the selector must point at a single root node, and -match the entire graph under that node. - -In case of CAR retrieval, the selector must have one common "sub-root" node. - -Examples: - -- Retrieve a file by CID - $ lotus client retrieve Qm... my-file.txt - -- Retrieve a file by CID from f0123 - $ lotus client retrieve --provider f0123 Qm... my-file.txt - -- Retrieve a first file from a specified directory - $ lotus client retrieve --data-selector /Links/0/Hash Qm... my-file.txt -`, - Flags: append([]cli.Flag{ - &cli.BoolFlag{ - Name: "car", - Usage: "Export to a car file instead of a regular file", - }, - &cli.StringFlag{ - Name: "data-selector", - Aliases: []string{"datamodel-path-selector"}, - Usage: "IPLD datamodel text-path selector, or IPLD json selector", - }, - &cli.BoolFlag{ - Name: "car-export-merkle-proof", - Usage: "(requires --data-selector and --car) Export data-selector merkle proof", - }, - }, retrFlagsCommon...), - Action: func(cctx *cli.Context) error { - if cctx.NArg() != 2 { - return IncorrectNumArgs(cctx) - } - - if cctx.Bool("car-export-merkle-proof") { - if !cctx.Bool("car") || !cctx.IsSet("data-selector") { - return ShowHelp(cctx, fmt.Errorf("--car-export-merkle-proof requires --car and --data-selector")) - } - } - - fapi, closer, err := GetFullNodeAPIV1(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - afmt := NewAppFmt(cctx.App) - - var s *lapi.Selector - if sel := lapi.Selector(cctx.String("data-selector")); sel != "" { - s = &sel - } - - eref, err := retrieve(ctx, cctx, fapi, s, afmt.Printf) - if err != nil { - return err - } - if eref == nil { - return xerrors.Errorf("failed to find providers") - } - - if s != nil { - eref.DAGs = append(eref.DAGs, lapi.DagSpec{DataSelector: s, ExportMerkleProof: cctx.Bool("car-export-merkle-proof")}) - } - - err = fapi.ClientExport(ctx, *eref, lapi.FileRef{ - Path: cctx.Args().Get(1), - IsCAR: cctx.Bool("car"), - }) - if err != nil { - return err - } - afmt.Println("Success") - return nil - }, -} - -var clientRetrieveCatCmd = &cli.Command{ - Name: "cat", - Usage: "Show data from network", - ArgsUsage: "[dataCid]", - Flags: append([]cli.Flag{ - &cli.BoolFlag{ - Name: "ipld", - Usage: "list IPLD datamodel links", - }, - &cli.StringFlag{ - Name: "data-selector", - Usage: "IPLD datamodel text-path selector, or IPLD json selector", - }, - }, retrFlagsCommon...), - Action: func(cctx *cli.Context) error { - if cctx.NArg() != 1 { - return IncorrectNumArgs(cctx) - } - - ainfo, err := GetAPIInfo(cctx, repo.FullNode) - if err != nil { - return xerrors.Errorf("could not get API info: %w", err) - } - - fapi, closer, err := GetFullNodeAPIV1(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - afmt := NewAppFmt(cctx.App) - - sel := lapi.Selector(cctx.String("data-selector")) - selp := &sel - if sel == "" { - selp = nil - } - - eref, err := retrieve(ctx, cctx, fapi, selp, afmt.Printf) - if err != nil { - return err - } - - fmt.Println() // separate retrieval events from results - - if sel != "" { - eref.DAGs = append(eref.DAGs, lapi.DagSpec{DataSelector: &sel}) - } - - rc, err := cliutil.ClientExportStream(ainfo.Addr, ainfo.AuthHeader(), *eref, false) - if err != nil { - return err - } - defer rc.Close() // nolint - - _, err = io.Copy(os.Stdout, rc) - return err - }, -} - -func pathToSel(psel string, matchTraversal bool, sub builder.SelectorSpec) (lapi.Selector, error) { - rs, err := textselector.SelectorSpecFromPath(textselector.Expression(psel), matchTraversal, sub) - if err != nil { - return "", xerrors.Errorf("failed to parse path-selector: %w", err) - } - - var b bytes.Buffer - if err := dagjson.Encode(rs.Node(), &b); err != nil { - return "", err - } - - return lapi.Selector(b.String()), nil -} - -var clientRetrieveLsCmd = &cli.Command{ - Name: "ls", - Usage: "List object links", - ArgsUsage: "[dataCid]", - Flags: append([]cli.Flag{ - &cli.BoolFlag{ - Name: "ipld", - Usage: "list IPLD datamodel links", - }, - &cli.IntFlag{ - Name: "depth", - Usage: "list links recursively up to the specified depth", - Value: 1, - }, - &cli.StringFlag{ - Name: "data-selector", - Usage: "IPLD datamodel text-path selector, or IPLD json selector", - }, - }, retrFlagsCommon...), - Action: func(cctx *cli.Context) error { - if cctx.NArg() != 1 { - return IncorrectNumArgs(cctx) - } - - ainfo, err := GetAPIInfo(cctx, repo.FullNode) - if err != nil { - return xerrors.Errorf("could not get API info: %w", err) - } - - fapi, closer, err := GetFullNodeAPIV1(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - afmt := NewAppFmt(cctx.App) - - dataSelector := lapi.Selector(fmt.Sprintf(`{"R":{"l":{"depth":%d},":>":{"a":{">":{"|":[{"@":{}},{".":{}}]}}}}}`, cctx.Int("depth"))) - - if cctx.IsSet("data-selector") { - ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) - dataSelector, err = pathToSel(cctx.String("data-selector"), cctx.Bool("ipld"), - ssb.ExploreUnion( - ssb.Matcher(), - ssb.ExploreAll( - ssb.ExploreRecursive(selector.RecursionLimitDepth(int64(cctx.Int("depth"))), ssb.ExploreAll(ssb.ExploreUnion(ssb.Matcher(), ssb.ExploreRecursiveEdge()))), - ))) - if err != nil { - return xerrors.Errorf("parsing datamodel path: %w", err) - } - } - - eref, err := retrieve(ctx, cctx, fapi, &dataSelector, afmt.Printf) - if err != nil { - return xerrors.Errorf("retrieve: %w", err) - } - - fmt.Println() // separate retrieval events from results - - eref.DAGs = append(eref.DAGs, lapi.DagSpec{ - DataSelector: &dataSelector, - }) - - rc, err := cliutil.ClientExportStream(ainfo.Addr, ainfo.AuthHeader(), *eref, true) - if err != nil { - return xerrors.Errorf("export: %w", err) - } - defer rc.Close() // nolint - - var memcar bytes.Buffer - _, err = io.Copy(&memcar, rc) - if err != nil { - return err - } - - cbs, err := blockstore.NewReadOnly(&bytesReaderAt{bytes.NewReader(memcar.Bytes())}, nil, - carv2.ZeroLengthSectionAsEOF(true), - blockstore.UseWholeCIDs(true)) - if err != nil { - return xerrors.Errorf("opening car blockstore: %w", err) - } - - roots, err := cbs.Roots() - if err != nil { - return xerrors.Errorf("getting roots: %w", err) - } - - if len(roots) != 1 { - return xerrors.Errorf("expected 1 car root, got %d", len(roots)) - } - dserv := merkledag.NewDAGService(blockservice.New(cbs, offline.Exchange(cbs))) - - if !cctx.Bool("ipld") { - links, err := dserv.GetLinks(ctx, roots[0]) - if err != nil { - return xerrors.Errorf("getting links: %w", err) - } - - for _, link := range links { - fmt.Printf("%s %s\t%d\n", link.Cid, link.Name, link.Size) - } - } else { - jsel := lapi.Selector(fmt.Sprintf(`{"R":{"l":{"depth":%d},":>":{"a":{">":{"|":[{"@":{}},{".":{}}]}}}}}`, cctx.Int("depth"))) - - if cctx.IsSet("data-selector") { - ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) - jsel, err = pathToSel(cctx.String("data-selector"), false, - ssb.ExploreRecursive(selector.RecursionLimitDepth(int64(cctx.Int("depth"))), ssb.ExploreAll(ssb.ExploreUnion(ssb.Matcher(), ssb.ExploreRecursiveEdge()))), - ) - } - - sel, _ := selectorparse.ParseJSONSelector(string(jsel)) - - if err := utils.TraverseDag( - ctx, - dserv, - roots[0], - sel, - nil, - func(p traversal.Progress, n ipld.Node, r traversal.VisitReason) error { - if r == traversal.VisitReason_SelectionMatch { - fmt.Println(p.Path) - } - return nil - }, - ); err != nil { - return err - } - } - - return err - }, -} - -type bytesReaderAt struct { - btr *bytes.Reader -} - -func (b bytesReaderAt) ReadAt(p []byte, off int64) (n int, err error) { - return b.btr.ReadAt(p, off) -} - -var _ io.ReaderAt = &bytesReaderAt{} diff --git a/cli/cmd.go b/cli/cmd.go index 76c0ab300a6..9ae8c14b75e 100644 --- a/cli/cmd.go +++ b/cli/cmd.go @@ -54,7 +54,6 @@ var GetFullNodeAPIV1 = cliutil.GetFullNodeAPIV1 var GetGatewayAPI = cliutil.GetGatewayAPI var GetStorageMinerAPI = cliutil.GetStorageMinerAPI -var GetMarketsAPI = cliutil.GetMarketsAPI var GetWorkerAPI = cliutil.GetWorkerAPI var CommonCommands = []*cli.Command{ diff --git a/cli/helper.go b/cli/helper.go index fb1899e0aaf..551422983b9 100644 --- a/cli/helper.go +++ b/cli/helper.go @@ -76,15 +76,15 @@ func NewAppFmt(a *ufcli.App) *AppFmt { } func (a *AppFmt) Print(args ...interface{}) { - fmt.Fprint(a.app.Writer, args...) + _, _ = fmt.Fprint(a.app.Writer, args...) } func (a *AppFmt) Println(args ...interface{}) { - fmt.Fprintln(a.app.Writer, args...) + _, _ = fmt.Fprintln(a.app.Writer, args...) } func (a *AppFmt) Printf(fmtstr string, args ...interface{}) { - fmt.Fprintf(a.app.Writer, fmtstr, args...) + _, _ = fmt.Fprintf(a.app.Writer, fmtstr, args...) } func (a *AppFmt) Scan(args ...interface{}) (int, error) { diff --git a/cli/info.go b/cli/info.go index 01f64dee9b4..482f01d98f2 100644 --- a/cli/info.go +++ b/cli/info.go @@ -3,9 +3,7 @@ package cli import ( "context" "fmt" - "math" "os" - "sort" "strings" "text/tabwriter" "time" @@ -14,7 +12,6 @@ import ( "github.com/fatih/color" "github.com/urfave/cli/v2" - "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/api/v1api" @@ -155,57 +152,6 @@ func infoCmdAct(cctx *cli.Context) error { fmt.Printf("Payment Channels: %v channels\n", len(chs)) } fmt.Println() - - localDeals, err := fullapi.ClientListDeals(ctx) - if err != nil { - return err - } - - var totalSize uint64 - byState := map[storagemarket.StorageDealStatus][]uint64{} - for _, deal := range localDeals { - totalSize += deal.Size - byState[deal.State] = append(byState[deal.State], deal.Size) - } - - fmt.Printf("Deals: %d, %s\n", len(localDeals), types.SizeStr(types.NewInt(totalSize))) - - type stateStat struct { - state storagemarket.StorageDealStatus - count int - bytes uint64 - } - - stateStats := make([]stateStat, 0, len(byState)) - for state, deals := range byState { - if state == storagemarket.StorageDealActive { - state = math.MaxUint64 // for sort - } - - st := stateStat{ - state: state, - count: len(deals), - } - for _, b := range deals { - st.bytes += b - } - - stateStats = append(stateStats, st) - } - - sort.Slice(stateStats, func(i, j int) bool { - return int64(stateStats[i].state) < int64(stateStats[j].state) - }) - - for _, st := range stateStats { - if st.state == math.MaxUint64 { - st.state = storagemarket.StorageDealActive - } - fmt.Printf(" %s: %d deals, %s\n", storagemarket.DealStates[st.state], st.count, types.SizeStr(types.NewInt(st.bytes))) - } - - fmt.Println() - tw := tabwriter.NewWriter(os.Stdout, 6, 6, 2, ' ', 0) s, err := fullapi.NetBandwidthStats(ctx) @@ -214,8 +160,18 @@ func infoCmdAct(cctx *cli.Context) error { } fmt.Printf("Bandwidth:\n") - fmt.Fprintf(tw, "\tTotalIn\tTotalOut\tRateIn\tRateOut\n") - fmt.Fprintf(tw, "\t%s\t%s\t%s/s\t%s/s\n", humanize.Bytes(uint64(s.TotalIn)), humanize.Bytes(uint64(s.TotalOut)), humanize.Bytes(uint64(s.RateIn)), humanize.Bytes(uint64(s.RateOut))) + if _, err := fmt.Fprintf(tw, "\tTotalIn\tTotalOut\tRateIn\tRateOut\n"); err != nil { + return err + } + if _, err := fmt.Fprintf( + tw, + "\t%s\t%s\t%s/s\t%s/s\n", + humanize.Bytes(uint64(s.TotalIn)), + humanize.Bytes(uint64(s.TotalOut)), + humanize.Bytes(uint64(s.RateIn)), + humanize.Bytes(uint64(s.RateOut))); err != nil { + return err + } return tw.Flush() } diff --git a/cli/multisig.go b/cli/multisig.go index 290cf6700e2..948d5006661 100644 --- a/cli/multisig.go +++ b/cli/multisig.go @@ -168,7 +168,7 @@ var msigCreateCmd = &cli.Command{ // check it executed successfully if wait.Receipt.ExitCode.IsError() { - fmt.Fprintln(cctx.App.Writer, "actor creation failed!") + _, _ = fmt.Fprintln(cctx.App.Writer, "actor creation failed!") return err } @@ -178,7 +178,7 @@ var msigCreateCmd = &cli.Command{ if err := execreturn.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil { return err } - fmt.Fprintln(cctx.App.Writer, "Created new multisig: ", execreturn.IDAddress, execreturn.RobustAddress) + _, _ = fmt.Fprintln(cctx.App.Writer, "Created new multisig: ", execreturn.IDAddress, execreturn.RobustAddress) // TODO: maybe register this somewhere return nil @@ -242,25 +242,25 @@ var msigInspectCmd = &cli.Command{ return err } - fmt.Fprintf(cctx.App.Writer, "Balance: %s\n", types.FIL(act.Balance)) - fmt.Fprintf(cctx.App.Writer, "Spendable: %s\n", types.FIL(types.BigSub(act.Balance, locked))) + _, _ = fmt.Fprintf(cctx.App.Writer, "Balance: %s\n", types.FIL(act.Balance)) + _, _ = fmt.Fprintf(cctx.App.Writer, "Spendable: %s\n", types.FIL(types.BigSub(act.Balance, locked))) if cctx.Bool("vesting") { ib, err := mstate.InitialBalance() if err != nil { return err } - fmt.Fprintf(cctx.App.Writer, "InitialBalance: %s\n", types.FIL(ib)) + _, _ = fmt.Fprintf(cctx.App.Writer, "InitialBalance: %s\n", types.FIL(ib)) se, err := mstate.StartEpoch() if err != nil { return err } - fmt.Fprintf(cctx.App.Writer, "StartEpoch: %d\n", se) + _, _ = fmt.Fprintf(cctx.App.Writer, "StartEpoch: %d\n", se) ud, err := mstate.UnlockDuration() if err != nil { return err } - fmt.Fprintf(cctx.App.Writer, "UnlockDuration: %d\n", ud) + _, _ = fmt.Fprintf(cctx.App.Writer, "UnlockDuration: %d\n", ud) } signers, err := mstate.Signers() @@ -271,17 +271,17 @@ var msigInspectCmd = &cli.Command{ if err != nil { return err } - fmt.Fprintf(cctx.App.Writer, "Threshold: %d / %d\n", threshold, len(signers)) - fmt.Fprintln(cctx.App.Writer, "Signers:") + _, _ = fmt.Fprintf(cctx.App.Writer, "Threshold: %d / %d\n", threshold, len(signers)) + _, _ = fmt.Fprintln(cctx.App.Writer, "Signers:") signerTable := tabwriter.NewWriter(cctx.App.Writer, 8, 4, 2, ' ', 0) - fmt.Fprintf(signerTable, "ID\tAddress\n") + _, _ = fmt.Fprintf(signerTable, "ID\tAddress\n") for _, s := range signers { signerActor, err := api.StateAccountKey(ctx, s, types.EmptyTSK) if err != nil { - fmt.Fprintf(signerTable, "%s\t%s\n", s, "N/A") + _, _ = fmt.Fprintf(signerTable, "%s\t%s\n", s, "N/A") } else { - fmt.Fprintf(signerTable, "%s\t%s\n", s, signerActor) + _, _ = fmt.Fprintf(signerTable, "%s\t%s\n", s, signerActor) } } if err := signerTable.Flush(); err != nil { @@ -297,7 +297,7 @@ var msigInspectCmd = &cli.Command{ } decParams := cctx.Bool("decode-params") - fmt.Fprintln(cctx.App.Writer, "Transactions: ", len(pending)) + _, _ = fmt.Fprintln(cctx.App.Writer, "Transactions: ", len(pending)) if len(pending) > 0 { var txids []int64 for txid := range pending { @@ -308,7 +308,7 @@ var msigInspectCmd = &cli.Command{ }) w := tabwriter.NewWriter(cctx.App.Writer, 8, 4, 2, ' ', 0) - fmt.Fprintf(w, "ID\tState\tApprovals\tTo\tValue\tMethod\tParams\n") + _, _ = fmt.Fprintf(w, "ID\tState\tApprovals\tTo\tValue\tMethod\tParams\n") for _, txid := range txids { tx := pending[txid] target := tx.To.String() @@ -320,9 +320,31 @@ var msigInspectCmd = &cli.Command{ if err != nil { if tx.Method == 0 { - fmt.Fprintf(w, "%d\t%s\t%d\t%s\t%s\t%s(%d)\t%s\n", txid, "pending", len(tx.Approved), target, types.FIL(tx.Value), "Send", tx.Method, paramStr) + _, _ = fmt.Fprintf( + w, + "%d\t%s\t%d\t%s\t%s\t%s(%d)\t%s\n", + txid, + "pending", + len(tx.Approved), + target, + types.FIL(tx.Value), + "Send", + tx.Method, + paramStr, + ) } else { - fmt.Fprintf(w, "%d\t%s\t%d\t%s\t%s\t%s(%d)\t%s\n", txid, "pending", len(tx.Approved), target, types.FIL(tx.Value), "new account, unknown method", tx.Method, paramStr) + _, _ = fmt.Fprintf( + w, + "%d\t%s\t%d\t%s\t%s\t%s(%d)\t%s\n", + txid, + "pending", + len(tx.Approved), + target, + types.FIL(tx.Value), + "new account, unknown method", + tx.Method, + paramStr, + ) } } else { method := consensus.NewActorRegistry().Methods[targAct.Code][tx.Method] // TODO: use remote map @@ -341,7 +363,18 @@ var msigInspectCmd = &cli.Command{ paramStr = string(b) } - fmt.Fprintf(w, "%d\t%s\t%d\t%s\t%s\t%s(%d)\t%s\n", txid, "pending", len(tx.Approved), target, types.FIL(tx.Value), method.Name, tx.Method, paramStr) + _, _ = fmt.Fprintf( + w, + "%d\t%s\t%d\t%s\t%s\t%s(%d)\t%s\n", + txid, + "pending", + len(tx.Approved), + target, + types.FIL(tx.Value), + method.Name, + tx.Method, + paramStr, + ) } } if err := w.Flush(); err != nil { @@ -923,7 +956,7 @@ var msigAddProposeCmd = &cli.Command{ msgCid := sm.Cid() - fmt.Fprintln(cctx.App.Writer, "sent add proposal in message: ", msgCid) + _, _ = fmt.Fprintln(cctx.App.Writer, "sent add proposal in message: ", msgCid) wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { diff --git a/cli/net.go b/cli/net.go index 99ee92aefc0..249a6c4b7db 100644 --- a/cli/net.go +++ b/cli/net.go @@ -66,7 +66,7 @@ var NetPeers = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - api, closer, err := GetAPI(cctx) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err } @@ -146,7 +146,7 @@ var NetPing = &cli.Command{ return IncorrectNumArgs(cctx) } - api, closer, err := GetAPI(cctx) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err } @@ -211,7 +211,7 @@ var NetScores = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - api, closer, err := GetAPI(cctx) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err } @@ -244,7 +244,7 @@ var NetListen = &cli.Command{ Name: "listen", Usage: "List listen addresses", Action: func(cctx *cli.Context) error { - api, closer, err := GetAPI(cctx) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err } @@ -268,7 +268,7 @@ var NetDisconnect = &cli.Command{ Usage: "Disconnect from a peer", ArgsUsage: "[peerID]", Action: func(cctx *cli.Context) error { - api, closer, err := GetAPI(cctx) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err } @@ -299,7 +299,7 @@ var NetConnect = &cli.Command{ Usage: "Connect to a peer", ArgsUsage: "[peerMultiaddr|minerActorAddress]", Action: func(cctx *cli.Context) error { - api, closer, err := GetAPI(cctx) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err } @@ -374,7 +374,7 @@ var NetId = &cli.Command{ Name: "id", Usage: "Get node identity", Action: func(cctx *cli.Context) error { - api, closer, err := GetAPI(cctx) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err } @@ -408,7 +408,7 @@ var NetFindPeer = &cli.Command{ return err } - api, closer, err := GetAPI(cctx) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err } @@ -431,7 +431,7 @@ var NetReachability = &cli.Command{ Name: "reachability", Usage: "Print information about reachability from the internet", Action: func(cctx *cli.Context) error { - api, closer, err := GetAPI(cctx) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err } @@ -466,7 +466,7 @@ var NetBandwidthCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - api, closer, err := GetAPI(cctx) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err } @@ -479,7 +479,7 @@ var NetBandwidthCmd = &cli.Command{ tw := tabwriter.NewWriter(os.Stdout, 4, 4, 2, ' ', 0) - fmt.Fprintf(tw, "Segment\tTotalIn\tTotalOut\tRateIn\tRateOut\n") + _, _ = fmt.Fprintf(tw, "Segment\tTotalIn\tTotalOut\tRateIn\tRateOut\n") if bypeer { bw, err := api.NetBandwidthStatsByPeer(ctx) @@ -498,7 +498,15 @@ var NetBandwidthCmd = &cli.Command{ for _, p := range peers { s := bw[p] - fmt.Fprintf(tw, "%s\t%s\t%s\t%s/s\t%s/s\n", p, humanize.Bytes(uint64(s.TotalIn)), humanize.Bytes(uint64(s.TotalOut)), humanize.Bytes(uint64(s.RateIn)), humanize.Bytes(uint64(s.RateOut))) + _, _ = fmt.Fprintf( + tw, + "%s\t%s\t%s\t%s/s\t%s/s\n", + p, + humanize.Bytes(uint64(s.TotalIn)), + humanize.Bytes(uint64(s.TotalOut)), + humanize.Bytes(uint64(s.RateIn)), + humanize.Bytes(uint64(s.RateOut)), + ) } } else if byproto { bw, err := api.NetBandwidthStatsByProtocol(ctx) @@ -520,7 +528,15 @@ var NetBandwidthCmd = &cli.Command{ if p == "" { p = "" } - fmt.Fprintf(tw, "%s\t%s\t%s\t%s/s\t%s/s\n", p, humanize.Bytes(uint64(s.TotalIn)), humanize.Bytes(uint64(s.TotalOut)), humanize.Bytes(uint64(s.RateIn)), humanize.Bytes(uint64(s.RateOut))) + _, _ = fmt.Fprintf( + tw, + "%s\t%s\t%s\t%s/s\t%s/s\n", + p, + humanize.Bytes(uint64(s.TotalIn)), + humanize.Bytes(uint64(s.TotalOut)), + humanize.Bytes(uint64(s.RateIn)), + humanize.Bytes(uint64(s.RateOut)), + ) } } else { @@ -529,7 +545,14 @@ var NetBandwidthCmd = &cli.Command{ return err } - fmt.Fprintf(tw, "Total\t%s\t%s\t%s/s\t%s/s\n", humanize.Bytes(uint64(s.TotalIn)), humanize.Bytes(uint64(s.TotalOut)), humanize.Bytes(uint64(s.RateIn)), humanize.Bytes(uint64(s.RateOut))) + _, _ = fmt.Fprintf( + tw, + "Total\t%s\t%s\t%s/s\t%s/s\n", + humanize.Bytes(uint64(s.TotalIn)), + humanize.Bytes(uint64(s.TotalOut)), + humanize.Bytes(uint64(s.RateIn)), + humanize.Bytes(uint64(s.RateOut)), + ) } return tw.Flush() @@ -562,7 +585,7 @@ var NetBlockAddPeer = &cli.Command{ Usage: "Block a peer", ArgsUsage: " ...", Action: func(cctx *cli.Context) error { - api, closer, err := GetAPI(cctx) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err } @@ -588,7 +611,7 @@ var NetBlockAddIP = &cli.Command{ Usage: "Block an IP address", ArgsUsage: " ...", Action: func(cctx *cli.Context) error { - api, closer, err := GetAPI(cctx) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err } @@ -604,7 +627,7 @@ var NetBlockAddSubnet = &cli.Command{ Usage: "Block an IP subnet", ArgsUsage: " ...", Action: func(cctx *cli.Context) error { - api, closer, err := GetAPI(cctx) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err } @@ -630,7 +653,7 @@ var NetBlockRemovePeer = &cli.Command{ Usage: "Unblock a peer", ArgsUsage: " ...", Action: func(cctx *cli.Context) error { - api, closer, err := GetAPI(cctx) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err } @@ -656,7 +679,7 @@ var NetBlockRemoveIP = &cli.Command{ Usage: "Unblock an IP address", ArgsUsage: " ...", Action: func(cctx *cli.Context) error { - api, closer, err := GetAPI(cctx) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err } @@ -672,7 +695,7 @@ var NetBlockRemoveSubnet = &cli.Command{ Usage: "Unblock an IP subnet", ArgsUsage: " ...", Action: func(cctx *cli.Context) error { - api, closer, err := GetAPI(cctx) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err } @@ -687,7 +710,7 @@ var NetBlockListCmd = &cli.Command{ Name: "list", Usage: "list connection gating rules", Action: func(cctx *cli.Context) error { - api, closer, err := GetAPI(cctx) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err } @@ -776,7 +799,7 @@ var NetStatCmd = &cli.Command{ - all -- reports the resource usage for all currently active scopes. `, Action: func(cctx *cli.Context) error { - api, closer, err := GetAPI(cctx) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err } @@ -883,7 +906,7 @@ var NetLimitCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - api, closer, err := GetAPI(cctx) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err } @@ -928,7 +951,7 @@ var NetProtectAdd = &cli.Command{ Usage: "Add one or more peer IDs to the list of protected peer connections", ArgsUsage: " [...]", Action: func(cctx *cli.Context) error { - api, closer, err := GetAPI(cctx) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err } @@ -958,7 +981,7 @@ var NetProtectRemove = &cli.Command{ Usage: "Remove one or more peer IDs from the list of protected peer connections.", ArgsUsage: " [...]", Action: func(cctx *cli.Context) error { - api, closer, err := GetAPI(cctx) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err } @@ -1010,7 +1033,7 @@ var NetProtectList = &cli.Command{ Name: "list-protected", Usage: "List the peer IDs with protected connection.", Action: func(cctx *cli.Context) error { - api, closer, err := GetAPI(cctx) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err } diff --git a/cli/paych.go b/cli/paych.go index 46b043d6a6d..a2f427fd5a5 100644 --- a/cli/paych.go +++ b/cli/paych.go @@ -39,11 +39,6 @@ var paychAddFundsCmd = &cli.Command{ Usage: "Add funds to the payment channel between fromAddress and toAddress. Creates the payment channel if it doesn't already exist.", ArgsUsage: "[fromAddress toAddress amount]", Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "restart-retrievals", - Usage: "restart stalled retrieval deals on this payment channel", - Value: true, - }, &cli.BoolFlag{ Name: "reserve", Usage: "mark funds as reserved", @@ -98,11 +93,7 @@ var paychAddFundsCmd = &cli.Command{ return err } - fmt.Fprintln(cctx.App.Writer, chAddr) - restartRetrievals := cctx.Bool("restart-retrievals") - if restartRetrievals { - return api.ClientRetrieveTryRestartInsufficientFunds(ctx, chAddr) - } + _, _ = fmt.Fprintln(cctx.App.Writer, chAddr) return nil }, } @@ -177,23 +168,23 @@ var paychStatusCmd = &cli.Command{ func paychStatus(writer io.Writer, avail *lapi.ChannelAvailableFunds) { if avail.Channel == nil { if avail.PendingWaitSentinel != nil { - fmt.Fprint(writer, "Creating channel\n") - fmt.Fprintf(writer, " From: %s\n", avail.From) - fmt.Fprintf(writer, " To: %s\n", avail.To) - fmt.Fprintf(writer, " Pending Amt: %s\n", types.FIL(avail.PendingAmt)) - fmt.Fprintf(writer, " Wait Sentinel: %s\n", avail.PendingWaitSentinel) + _, _ = fmt.Fprint(writer, "Creating channel\n") + _, _ = fmt.Fprintf(writer, " From: %s\n", avail.From) + _, _ = fmt.Fprintf(writer, " To: %s\n", avail.To) + _, _ = fmt.Fprintf(writer, " Pending Amt: %s\n", types.FIL(avail.PendingAmt)) + _, _ = fmt.Fprintf(writer, " Wait Sentinel: %s\n", avail.PendingWaitSentinel) return } - fmt.Fprint(writer, "Channel does not exist\n") - fmt.Fprintf(writer, " From: %s\n", avail.From) - fmt.Fprintf(writer, " To: %s\n", avail.To) + _, _ = fmt.Fprint(writer, "Channel does not exist\n") + _, _ = fmt.Fprintf(writer, " From: %s\n", avail.From) + _, _ = fmt.Fprintf(writer, " To: %s\n", avail.To) return } if avail.PendingWaitSentinel != nil { - fmt.Fprint(writer, "Adding Funds to channel\n") + _, _ = fmt.Fprint(writer, "Adding Funds to channel\n") } else { - fmt.Fprint(writer, "Channel exists\n") + _, _ = fmt.Fprint(writer, "Channel exists\n") } nameValues := [][]string{ @@ -213,7 +204,7 @@ func paychStatus(writer io.Writer, avail *lapi.ChannelAvailableFunds) { avail.PendingWaitSentinel.String(), }) } - fmt.Fprint(writer, formatNameValues(nameValues)) + _, _ = fmt.Fprint(writer, formatNameValues(nameValues)) } func formatNameValues(nameValues [][]string) string { @@ -249,7 +240,7 @@ var paychListCmd = &cli.Command{ } for _, v := range chs { - fmt.Fprintln(cctx.App.Writer, v.String()) + _, _ = fmt.Fprintln(cctx.App.Writer, v.String()) } return nil }, @@ -290,7 +281,7 @@ var paychSettleCmd = &cli.Command{ return fmt.Errorf("settle message execution failed (exit code %d)", mwait.Receipt.ExitCode) } - fmt.Fprintf(cctx.App.Writer, "Settled channel %s\n", ch) + _, _ = fmt.Fprintf(cctx.App.Writer, "Settled channel %s\n", ch) return nil }, } @@ -330,7 +321,7 @@ var paychCloseCmd = &cli.Command{ return fmt.Errorf("collect message execution failed (exit code %d)", mwait.Receipt.ExitCode) } - fmt.Fprintf(cctx.App.Writer, "Collected funds for channel %s\n", ch) + _, _ = fmt.Fprintf(cctx.App.Writer, "Collected funds for channel %s\n", ch) return nil }, } @@ -390,7 +381,7 @@ var paychVoucherCreateCmd = &cli.Command{ } if v.Voucher == nil { - return fmt.Errorf("Could not create voucher: insufficient funds in channel, shortfall: %d", v.Shortfall) + return fmt.Errorf("could not create voucher: insufficient funds in channel, shortfall: %d", v.Shortfall) } enc, err := EncodedString(v.Voucher) @@ -398,7 +389,7 @@ var paychVoucherCreateCmd = &cli.Command{ return err } - fmt.Fprintln(cctx.App.Writer, enc) + _, _ = fmt.Fprintln(cctx.App.Writer, enc) return nil }, } @@ -434,7 +425,7 @@ var paychVoucherCheckCmd = &cli.Command{ return err } - fmt.Fprintln(cctx.App.Writer, "voucher is valid") + _, _ = fmt.Fprintln(cctx.App.Writer, "voucher is valid") return nil }, } @@ -589,12 +580,16 @@ func outputVoucher(w io.Writer, v *paych.SignedVoucher, export bool) error { } } - fmt.Fprintf(w, "Lane %d, Nonce %d: %s", v.Lane, v.Nonce, types.FIL(v.Amount)) + if _, err := fmt.Fprintf(w, "Lane %d, Nonce %d: %s", v.Lane, v.Nonce, types.FIL(v.Amount)); err != nil { + return err + } if export { - fmt.Fprintf(w, "; %s", enc) + if _, err := fmt.Fprintf(w, "; %s", enc); err != nil { + return err + } } - fmt.Fprintln(w) - return nil + _, err := fmt.Fprintln(w) + return err } var paychVoucherSubmitCmd = &cli.Command{ @@ -638,7 +633,7 @@ var paychVoucherSubmitCmd = &cli.Command{ return fmt.Errorf("message execution failed (exit code %d)", mwait.Receipt.ExitCode) } - fmt.Fprintln(cctx.App.Writer, "channel updated successfully") + _, _ = fmt.Fprintln(cctx.App.Writer, "channel updated successfully") return nil }, diff --git a/cli/send.go b/cli/send.go index 89c79e109bd..a106e848037 100644 --- a/cli/send.go +++ b/cli/send.go @@ -217,7 +217,7 @@ var SendCmd = &cli.Command{ return err } - fmt.Fprintf(cctx.App.Writer, "%s\n", sm.Cid()) + _, _ = fmt.Fprintf(cctx.App.Writer, "%s\n", sm.Cid()) return nil }, } diff --git a/cli/sending_ui.go b/cli/sending_ui.go index c248feb3d3e..34eb256494b 100644 --- a/cli/sending_ui.go +++ b/cli/sending_ui.go @@ -28,7 +28,7 @@ func InteractiveSend(ctx context.Context, cctx *cli.Context, srv ServicesAPI, printer := cctx.App.Writer if xerrors.Is(err, ErrCheckFailed) { if !cctx.Bool("interactive") { - fmt.Fprintf(printer, "Following checks have failed:\n") + _, _ = fmt.Fprintf(printer, "Following checks have failed:\n") printChecks(printer, checks, proto.Message.Cid()) } else { proto, err = resolveChecks(ctx, srv, cctx.App.Writer, proto, checks) @@ -75,11 +75,11 @@ func resolveChecks(ctx context.Context, s ServicesAPI, printer io.Writer, proto *api.MessagePrototype, checkGroups [][]api.MessageCheckStatus, ) (*api.MessagePrototype, error) { - fmt.Fprintf(printer, "Following checks have failed:\n") + _, _ = fmt.Fprintf(printer, "Following checks have failed:\n") printChecks(printer, checkGroups, proto.Message.Cid()) if feeCapBad, baseFee := isFeeCapProblem(checkGroups, proto.Message.Cid()); feeCapBad { - fmt.Fprintf(printer, "Fee of the message can be adjusted\n") + _, _ = fmt.Fprintf(printer, "Fee of the message can be adjusted\n") if askUser(printer, "Do you wish to do that? [Yes/no]: ", true) { var err error proto, err = runFeeCapAdjustmentUI(proto, baseFee) @@ -91,7 +91,7 @@ func resolveChecks(ctx context.Context, s ServicesAPI, printer io.Writer, if err != nil { return nil, err } - fmt.Fprintf(printer, "Following checks still failed:\n") + _, _ = fmt.Fprintf(printer, "Following checks still failed:\n") printChecks(printer, checks, proto.Message.Cid()) } @@ -114,14 +114,14 @@ func printChecks(printer io.Writer, checkGroups [][]api.MessageCheckStatus, prot if !aboutProto { msgName = c.Cid.String() } - fmt.Fprintf(printer, "%s message failed a check %s: %s\n", msgName, c.Code, c.Err) + _, _ = fmt.Fprintf(printer, "%s message failed a check %s: %s\n", msgName, c.Code, c.Err) } } } func askUser(printer io.Writer, q string, def bool) bool { var resp string - fmt.Fprint(printer, q) + _, _ = fmt.Fprint(printer, q) _, _ = fmt.Scanln(&resp) resp = strings.ToLower(resp) if len(resp) == 0 { diff --git a/cli/spcli/actor.go b/cli/spcli/actor.go index 33590de50b6..d8d35543814 100644 --- a/cli/spcli/actor.go +++ b/cli/spcli/actor.go @@ -670,7 +670,7 @@ func ActorProposeChangeWorkerCmd(getActor ActorAddressGetter) *cli.Command { } if !cctx.Bool("really-do-it") { - fmt.Fprintln(cctx.App.Writer, "Pass --really-do-it to actually execute this action") + _, _ = fmt.Fprintln(cctx.App.Writer, "Pass --really-do-it to actually execute this action") return nil } @@ -695,7 +695,7 @@ func ActorProposeChangeWorkerCmd(getActor ActorAddressGetter) *cli.Command { return xerrors.Errorf("mpool push: %w", err) } - fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", smsg.Cid()) + _, _ = fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", smsg.Cid()) // wait for it to get mined into a block wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) @@ -716,8 +716,8 @@ func ActorProposeChangeWorkerCmd(getActor ActorAddressGetter) *cli.Command { return fmt.Errorf("Proposed worker address change not reflected on chain: expected '%s', found '%s'", na, mi.NewWorker) } - fmt.Fprintf(cctx.App.Writer, "Worker key change to %s successfully sent, change happens at height %d.\n", na, mi.WorkerChangeEpoch) - fmt.Fprintf(cctx.App.Writer, "If you have no active deadlines, call 'confirm-change-worker' at or after height %d to complete.\n", mi.WorkerChangeEpoch) + _, _ = fmt.Fprintf(cctx.App.Writer, "Worker key change to %s successfully sent, change happens at height %d.\n", na, mi.WorkerChangeEpoch) + _, _ = fmt.Fprintf(cctx.App.Writer, "If you have no active deadlines, call 'confirm-change-worker' at or after height %d to complete.\n", mi.WorkerChangeEpoch) return nil }, @@ -942,7 +942,7 @@ func ActorConfirmChangeWorkerCmd(getActor ActorAddressGetter) *cli.Command { // check it executed successfully if wait.Receipt.ExitCode.IsError() { - fmt.Fprintln(cctx.App.Writer, "Worker change failed!") + _, _ = fmt.Fprintln(cctx.App.Writer, "Worker change failed!") return err } @@ -1268,6 +1268,11 @@ var ActorNewMinerCmd = &cli.Command{ Name: "sector-size", Usage: "specify sector size to use for new miner initialisation", }, + &cli.IntFlag{ + Name: "confidence", + Usage: "number of block confirmations to wait for", + Value: int(build.MessageConfidence), + }, }, Action: func(cctx *cli.Context) error { ctx := cctx.Context diff --git a/cli/spcli/sectors.go b/cli/spcli/sectors.go index 95acbcd111e..1dad6ada264 100644 --- a/cli/spcli/sectors.go +++ b/cli/spcli/sectors.go @@ -159,6 +159,11 @@ func SectorsStatusCmd(getActorAddress ActorAddressGetter, getOnDiskInfo OnDiskIn return err } + if status == nil { + fmt.Println("Sector status not found on chain") + return nil + } + mid, err := address.IDFromAddress(maddr) if err != nil { return err @@ -261,12 +266,14 @@ func SectorsStatusCmd(getActorAddress ActorAddressGetter, getOnDiskInfo OnDiskIn } if cctx.Bool("log") { - fmt.Printf("--------\nEvent Log:\n") + if getOnDiskInfo != nil { + fmt.Printf("--------\nEvent Log:\n") - for i, l := range status.Log { - fmt.Printf("%d.\t%s:\t[%s]\t%s\n", i, time.Unix(int64(l.Timestamp), 0), l.Kind, l.Message) - if l.Trace != "" { - fmt.Printf("\t%s\n", l.Trace) + for i, l := range status.Log { + fmt.Printf("%d.\t%s:\t[%s]\t%s\n", i, time.Unix(int64(l.Timestamp), 0), l.Kind, l.Message) + if l.Trace != "" { + fmt.Printf("\t%s\n", l.Trace) + } } } } diff --git a/cli/util/api.go b/cli/util/api.go index 7940f67c63a..39077e98e1d 100644 --- a/cli/util/api.go +++ b/cli/util/api.go @@ -179,7 +179,7 @@ func GetRawAPI(ctx *cli.Context, t repo.RepoType, version string) (string, http. return heads[0].addr, heads[0].header, nil } -func GetCommonAPI(ctx *cli.Context) (api.CommonNet, jsonrpc.ClientCloser, error) { +func GetCommonAPI(ctx *cli.Context) (api.Common, jsonrpc.ClientCloser, error) { ti, ok := ctx.App.Metadata["repoType"] if !ok { log.Errorf("unknown repo type, are you sure you want to use GetCommonAPI?") @@ -456,27 +456,6 @@ func GetWorkerAPI(ctx *cli.Context) (api.Worker, jsonrpc.ClientCloser, error) { return client.NewWorkerRPCV0(ctx.Context, addr, headers) } -func GetMarketsAPI(ctx *cli.Context) (api.StorageMiner, jsonrpc.ClientCloser, error) { - // to support lotus-miner cli tests. - if tn, ok := ctx.App.Metadata["testnode-storage"]; ok { - return tn.(api.StorageMiner), func() {}, nil - } - - addr, headers, err := GetRawAPI(ctx, repo.Markets, "v0") - if err != nil { - return nil, nil, err - } - - if IsVeryVerbose { - _, _ = fmt.Fprintln(ctx.App.Writer, "using markets API v0 endpoint:", addr) - } - - // the markets node is a specialised miner's node, supporting only the - // markets API, which is a subset of the miner API. All non-markets - // operations will error out with "unsupported". - return client.NewStorageMinerRPCV0(ctx.Context, addr, headers) -} - func GetGatewayAPI(ctx *cli.Context) (api.Gateway, jsonrpc.ClientCloser, error) { addr, headers, err := GetRawAPI(ctx, repo.FullNode, "v1") if err != nil { diff --git a/cli/util/retrieval.go b/cli/util/retrieval.go deleted file mode 100644 index ac34fcf3a48..00000000000 --- a/cli/util/retrieval.go +++ /dev/null @@ -1,77 +0,0 @@ -package cliutil - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "path" - - "github.com/multiformats/go-multiaddr" - manet "github.com/multiformats/go-multiaddr/net" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/api" -) - -func ApiAddrToUrl(apiAddr string) (*url.URL, error) { - ma, err := multiaddr.NewMultiaddr(apiAddr) - if err == nil { - _, addr, err := manet.DialArgs(ma) - if err != nil { - return nil, err - } - // todo: make cliutil helpers for this - apiAddr = "http://" + addr - } - aa, err := url.Parse(apiAddr) - if err != nil { - return nil, xerrors.Errorf("parsing api address: %w", err) - } - switch aa.Scheme { - case "ws": - aa.Scheme = "http" - case "wss": - aa.Scheme = "https" - } - - return aa, nil -} - -func ClientExportStream(apiAddr string, apiAuth http.Header, eref api.ExportRef, car bool) (io.ReadCloser, error) { - rj, err := json.Marshal(eref) - if err != nil { - return nil, xerrors.Errorf("marshaling export ref: %w", err) - } - - aa, err := ApiAddrToUrl(apiAddr) - if err != nil { - return nil, err - } - - aa.Path = path.Join(aa.Path, "rest/v0/export") - req, err := http.NewRequest("GET", fmt.Sprintf("%s?car=%t&export=%s", aa, car, url.QueryEscape(string(rj))), nil) - if err != nil { - return nil, err - } - - req.Header = apiAuth - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusOK { - em, err := io.ReadAll(resp.Body) - if err != nil { - return nil, xerrors.Errorf("reading error body: %w", err) - } - - resp.Body.Close() // nolint - return nil, xerrors.Errorf("getting root car: http %d: %s", resp.StatusCode, string(em)) - } - - return resp.Body, nil -} diff --git a/cmd/curio/cli.go b/cmd/curio/cli.go deleted file mode 100644 index 6c9cb7ec67b..00000000000 --- a/cmd/curio/cli.go +++ /dev/null @@ -1,249 +0,0 @@ -package main - -import ( - "bufio" - "context" - "encoding/base64" - "errors" - "fmt" - "net" - "os" - "time" - - "github.com/BurntSushi/toml" - "github.com/gbrlsnchs/jwt/v3" - manet "github.com/multiformats/go-multiaddr/net" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-jsonrpc/auth" - - "github.com/filecoin-project/lotus/api" - lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/cmd/curio/rpc" -) - -const providerEnvVar = "CURIO_API_INFO" - -var cliCmd = &cli.Command{ - Name: "cli", - Usage: "Execute cli commands", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "machine", - Usage: "machine host:port (curio run --listen address)", - }, - }, - Before: func(cctx *cli.Context) error { - if os.Getenv(providerEnvVar) != "" { - // set already - return nil - } - if os.Getenv("LOTUS_DOCS_GENERATION") == "1" { - return nil - } - - db, err := deps.MakeDB(cctx) - if err != nil { - return err - } - - ctx := lcli.ReqContext(cctx) - - machine := cctx.String("machine") - if machine == "" { - // interactive picker - var machines []struct { - HostAndPort string `db:"host_and_port"` - LastContact time.Time `db:"last_contact"` - } - - err := db.Select(ctx, &machines, "select host_and_port, last_contact from harmony_machines") - if err != nil { - return xerrors.Errorf("getting machine list: %w", err) - } - - now := time.Now() - fmt.Println("Available machines:") - for i, m := range machines { - // A machine is healthy if contacted not longer than 2 minutes ago - healthStatus := "unhealthy" - if now.Sub(m.LastContact) <= 2*time.Minute { - healthStatus = "healthy" - } - fmt.Printf("%d. %s %s\n", i+1, m.HostAndPort, healthStatus) - } - - fmt.Print("Select: ") - reader := bufio.NewReader(os.Stdin) - input, err := reader.ReadString('\n') - if err != nil { - return xerrors.Errorf("reading selection: %w", err) - } - - var selection int - _, err = fmt.Sscanf(input, "%d", &selection) - if err != nil { - return xerrors.Errorf("parsing selection: %w", err) - } - - if selection < 1 || selection > len(machines) { - return xerrors.New("invalid selection") - } - - machine = machines[selection-1].HostAndPort - } - - var apiKeys []string - { - var dbconfigs []struct { - Config string `db:"config"` - Title string `db:"title"` - } - - err := db.Select(ctx, &dbconfigs, "select config from harmony_config") - if err != nil { - return xerrors.Errorf("getting configs: %w", err) - } - - var seen = make(map[string]struct{}) - - for _, config := range dbconfigs { - var layer struct { - Apis struct { - StorageRPCSecret string - } - } - - if _, err := toml.Decode(config.Config, &layer); err != nil { - return xerrors.Errorf("decode config layer %s: %w", config.Title, err) - } - - if layer.Apis.StorageRPCSecret != "" { - if _, ok := seen[layer.Apis.StorageRPCSecret]; ok { - continue - } - seen[layer.Apis.StorageRPCSecret] = struct{}{} - apiKeys = append(apiKeys, layer.Apis.StorageRPCSecret) - } - } - } - - if len(apiKeys) == 0 { - return xerrors.New("no api keys found in the database") - } - if len(apiKeys) > 1 { - return xerrors.Errorf("multiple api keys found in the database, not supported yet") - } - - var apiToken []byte - { - type jwtPayload struct { - Allow []auth.Permission - } - - p := jwtPayload{ - Allow: api.AllPermissions, - } - - sk, err := base64.StdEncoding.DecodeString(apiKeys[0]) - if err != nil { - return xerrors.Errorf("decode secret: %w", err) - } - - apiToken, err = jwt.Sign(&p, jwt.NewHS256(sk)) - if err != nil { - return xerrors.Errorf("signing token: %w", err) - } - } - - { - - laddr, err := net.ResolveTCPAddr("tcp", machine) - if err != nil { - return xerrors.Errorf("net resolve: %w", err) - } - - if len(laddr.IP) == 0 { - // set localhost - laddr.IP = net.IPv4(127, 0, 0, 1) - } - - ma, err := manet.FromNetAddr(laddr) - if err != nil { - return xerrors.Errorf("net from addr (%v): %w", laddr, err) - } - - token := fmt.Sprintf("%s:%s", string(apiToken), ma) - if err := os.Setenv(providerEnvVar, token); err != nil { - return xerrors.Errorf("setting env var: %w", err) - } - } - - { - api, closer, err := rpc.GetCurioAPI(cctx) - if err != nil { - return err - } - defer closer() - - v, err := api.Version(ctx) - if err != nil { - return xerrors.Errorf("querying version: %w", err) - } - - fmt.Println("remote node version:", v.String()) - } - - return nil - }, - Subcommands: []*cli.Command{ - storageCmd, - logCmd, - waitApiCmd, - }, -} - -var waitApiCmd = &cli.Command{ - Name: "wait-api", - Usage: "Wait for Curio api to come online", - Flags: []cli.Flag{ - &cli.DurationFlag{ - Name: "timeout", - Usage: "duration to wait till fail", - Value: time.Second * 30, - }, - }, - Action: func(cctx *cli.Context) error { - ctx := lcli.ReqContext(cctx) - ctx, cancel := context.WithTimeout(ctx, cctx.Duration("timeout")) - defer cancel() - for { - if ctx.Err() != nil { - break - } - - api, closer, err := rpc.GetCurioAPI(cctx) - if err != nil { - fmt.Printf("Not online yet... (%s)\n", err) - time.Sleep(time.Second) - continue - } - defer closer() - - _, err = api.Version(ctx) - if err != nil { - return err - } - - return nil - } - - if errors.Is(ctx.Err(), context.DeadlineExceeded) { - return fmt.Errorf("timed out waiting for api to come online") - } - - return ctx.Err() - }, -} diff --git a/cmd/curio/config.go b/cmd/curio/config.go deleted file mode 100644 index 16b7d89c378..00000000000 --- a/cmd/curio/config.go +++ /dev/null @@ -1,440 +0,0 @@ -package main - -import ( - "context" - "errors" - "fmt" - "io" - "os" - "os/exec" - "path" - "strings" - - "github.com/BurntSushi/toml" - "github.com/fatih/color" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/node/config" -) - -var configCmd = &cli.Command{ - Name: "config", - Usage: "Manage node config by layers. The layer 'base' will always be applied at Curio start-up.", - Subcommands: []*cli.Command{ - configDefaultCmd, - configSetCmd, - configGetCmd, - configListCmd, - configViewCmd, - configRmCmd, - configEditCmd, - configNewCmd, - }, -} - -var configDefaultCmd = &cli.Command{ - Name: "default", - Aliases: []string{"defaults"}, - Usage: "Print default node config", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "no-comment", - Usage: "don't comment default values", - }, - }, - Action: func(cctx *cli.Context) error { - comment := !cctx.Bool("no-comment") - cfg, err := deps.GetDefaultConfig(comment) - if err != nil { - return err - } - fmt.Print(cfg) - - return nil - }, -} - -var configSetCmd = &cli.Command{ - Name: "set", - Aliases: []string{"add", "update", "create"}, - Usage: "Set a config layer or the base by providing a filename or stdin.", - ArgsUsage: "a layer's file name", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "title", - Usage: "title of the config layer (req'd for stdin)", - }, - }, - Action: func(cctx *cli.Context) error { - args := cctx.Args() - - db, err := deps.MakeDB(cctx) - if err != nil { - return err - } - - name := cctx.String("title") - var stream io.Reader = os.Stdin - if args.Len() != 1 { - if cctx.String("title") == "" { - return errors.New("must have a title for stdin, or a file name") - } - } else { - stream, err = os.Open(args.First()) - if err != nil { - return fmt.Errorf("cannot open file %s: %w", args.First(), err) - } - if name == "" { - name = strings.Split(path.Base(args.First()), ".")[0] - } - } - bytes, err := io.ReadAll(stream) - if err != nil { - return fmt.Errorf("cannot read stream/file %w", err) - } - - curioConfig := config.DefaultCurioConfig() // ensure it's toml - _, err = deps.LoadConfigWithUpgrades(string(bytes), curioConfig) - if err != nil { - return fmt.Errorf("cannot decode file: %w", err) - } - _ = curioConfig - - err = setConfig(db, name, string(bytes)) - - if err != nil { - return fmt.Errorf("unable to save config layer: %w", err) - } - - fmt.Println("Layer " + name + " created/updated") - return nil - }, -} - -func setConfig(db *harmonydb.DB, name, config string) error { - _, err := db.Exec(context.Background(), - `INSERT INTO harmony_config (title, config) VALUES ($1, $2) - ON CONFLICT (title) DO UPDATE SET config = excluded.config`, name, config) - return err -} - -var configGetCmd = &cli.Command{ - Name: "get", - Aliases: []string{"cat", "show"}, - Usage: "Get a config layer by name. You may want to pipe the output to a file, or use 'less'", - ArgsUsage: "layer name", - Action: func(cctx *cli.Context) error { - args := cctx.Args() - if args.Len() != 1 { - return fmt.Errorf("want 1 layer arg, got %d", args.Len()) - } - db, err := deps.MakeDB(cctx) - if err != nil { - return err - } - - cfg, err := getConfig(db, args.First()) - if err != nil { - return err - } - fmt.Println(cfg) - - return nil - }, -} - -func getConfig(db *harmonydb.DB, layer string) (string, error) { - var cfg string - err := db.QueryRow(context.Background(), `SELECT config FROM harmony_config WHERE title=$1`, layer).Scan(&cfg) - if err != nil { - return "", err - } - return cfg, nil -} - -var configListCmd = &cli.Command{ - Name: "list", - Aliases: []string{"ls"}, - Usage: "List config layers present in the DB.", - Flags: []cli.Flag{}, - Action: func(cctx *cli.Context) error { - db, err := deps.MakeDB(cctx) - if err != nil { - return err - } - var res []string - err = db.Select(context.Background(), &res, `SELECT title FROM harmony_config ORDER BY title`) - if err != nil { - return fmt.Errorf("unable to read from db: %w", err) - } - for _, r := range res { - fmt.Println(r) - } - - return nil - }, -} - -var configRmCmd = &cli.Command{ - Name: "remove", - Aliases: []string{"rm", "del", "delete"}, - Usage: "Remove a named config layer.", - Flags: []cli.Flag{}, - Action: func(cctx *cli.Context) error { - args := cctx.Args() - if args.Len() != 1 { - return errors.New("must have exactly 1 arg for the layer name") - } - db, err := deps.MakeDB(cctx) - if err != nil { - return err - } - ct, err := db.Exec(context.Background(), `DELETE FROM harmony_config WHERE title=$1`, args.First()) - if err != nil { - return fmt.Errorf("unable to read from db: %w", err) - } - if ct == 0 { - return fmt.Errorf("no layer named %s", args.First()) - } - - return nil - }, -} -var configViewCmd = &cli.Command{ - Name: "interpret", - Aliases: []string{"view", "stacked", "stack"}, - Usage: "Interpret stacked config layers by this version of curio, with system-generated comments.", - ArgsUsage: "a list of layers to be interpreted as the final config", - Flags: []cli.Flag{ - &cli.StringSliceFlag{ - Name: "layers", - Usage: "comma or space separated list of layers to be interpreted (base is always applied)", - Required: true, - }, - }, - Action: func(cctx *cli.Context) error { - db, err := deps.MakeDB(cctx) - if err != nil { - return err - } - curioConfig, err := deps.GetConfig(cctx, db) - if err != nil { - return err - } - cb, err := config.ConfigUpdate(curioConfig, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) - if err != nil { - return xerrors.Errorf("cannot interpret config: %w", err) - } - fmt.Println(string(cb)) - return nil - }, -} - -var configEditCmd = &cli.Command{ - Name: "edit", - Usage: "edit a config layer", - ArgsUsage: "[layer name]", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "editor", - Usage: "editor to use", - Value: "vim", - EnvVars: []string{"EDITOR"}, - }, - &cli.StringFlag{ - Name: "source", - Usage: "source config layer", - DefaultText: "", - }, - &cli.BoolFlag{ - Name: "allow-overwrite", - Usage: "allow overwrite of existing layer if source is a different layer", - }, - &cli.BoolFlag{ - Name: "no-source-diff", - Usage: "save the whole config into the layer, not just the diff", - }, - &cli.BoolFlag{ - Name: "no-interpret-source", - Usage: "do not interpret source layer", - DefaultText: "true if --source is set", - }, - }, - Action: func(cctx *cli.Context) error { - layer := cctx.Args().First() - if layer == "" { - return errors.New("layer name is required") - } - - source := layer - if cctx.IsSet("source") { - source = cctx.String("source") - - if source == layer && !cctx.Bool("allow-owerwrite") { - return errors.New("source and target layers are the same") - } - } - - db, err := deps.MakeDB(cctx) - if err != nil { - return err - } - - sourceConfig, err := getConfig(db, source) - if err != nil { - return xerrors.Errorf("getting source config: %w", err) - } - - if cctx.IsSet("source") && source != layer && !cctx.Bool("no-interpret-source") { - lp := config.DefaultCurioConfig() - if _, err := toml.Decode(sourceConfig, lp); err != nil { - return xerrors.Errorf("parsing source config: %w", err) - } - - cb, err := config.ConfigUpdate(lp, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) - if err != nil { - return xerrors.Errorf("interpreting source config: %w", err) - } - sourceConfig = string(cb) - } - - editor := cctx.String("editor") - newConfig, err := edit(editor, sourceConfig) - if err != nil { - return xerrors.Errorf("editing config: %w", err) - } - - toWrite := newConfig - - if cctx.IsSet("source") && !cctx.Bool("no-source-diff") { - updated, err := diff(sourceConfig, newConfig) - if err != nil { - return xerrors.Errorf("computing diff: %w", err) - } - - { - fmt.Printf("%s will write changes as the layer because %s is not set\n", color.YellowString(">"), color.GreenString("--no-source-diff")) - fmt.Println(updated) - fmt.Printf("%s Confirm [y]: ", color.YellowString(">")) - - for { - var confirmBuf [16]byte - n, err := os.Stdin.Read(confirmBuf[:]) - if err != nil { - return xerrors.Errorf("reading confirmation: %w", err) - } - confirm := strings.TrimSpace(string(confirmBuf[:n])) - - if confirm == "" { - confirm = "y" - } - - if confirm[:1] == "y" { - break - } - if confirm[:1] == "n" { - return nil - } - - fmt.Printf("%s Confirm [y]:\n", color.YellowString(">")) - } - } - - toWrite = updated - } - - fmt.Printf("%s Writing config for layer %s\n", color.YellowString(">"), color.GreenString(layer)) - - return setConfig(db, layer, toWrite) - }, -} - -func diff(sourceConf, newConf string) (string, error) { - lpSrc := config.DefaultCurioConfig() - lpNew := config.DefaultCurioConfig() - - _, err := toml.Decode(sourceConf, lpSrc) - if err != nil { - return "", xerrors.Errorf("decoding source config: %w", err) - } - - _, err = toml.Decode(newConf, lpNew) - if err != nil { - return "", xerrors.Errorf("decoding new config: %w", err) - } - - cb, err := config.ConfigUpdate(lpNew, lpSrc, config.Commented(true), config.NoEnv()) - if err != nil { - return "", xerrors.Errorf("interpreting source config: %w", err) - } - - lines := strings.Split(string(cb), "\n") - var outLines []string - var categoryBuf string - - for _, line := range lines { - // drop empty lines - if strings.TrimSpace(line) == "" { - continue - } - // drop lines starting with '#' - if strings.HasPrefix(strings.TrimSpace(line), "#") { - continue - } - // if starting with [, it's a category - if strings.HasPrefix(strings.TrimSpace(line), "[") { - categoryBuf = line - continue - } - - if categoryBuf != "" { - outLines = append(outLines, categoryBuf) - categoryBuf = "" - } - - outLines = append(outLines, line) - } - - return strings.Join(outLines, "\n"), nil -} - -func edit(editor, cfg string) (string, error) { - file, err := os.CreateTemp("", "curio-config-*.toml") - if err != nil { - return "", err - } - - _, err = file.WriteString(cfg) - if err != nil { - return "", err - } - - filePath := file.Name() - - if err := file.Close(); err != nil { - return "", err - } - - defer func() { - _ = os.Remove(filePath) - }() - - cmd := exec.Command(editor, filePath) - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err = cmd.Run() - if err != nil { - return "", err - } - - data, err := os.ReadFile(filePath) - if err != nil { - return "", err - } - - return string(data), err -} diff --git a/cmd/curio/config_new.go b/cmd/curio/config_new.go deleted file mode 100644 index 65549bd6995..00000000000 --- a/cmd/curio/config_new.go +++ /dev/null @@ -1,57 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/api" - cliutil "github.com/filecoin-project/lotus/cli/util" - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/node/repo" -) - -var configNewCmd = &cli.Command{ - Name: "new-cluster", - Usage: "Create new configuration for a new cluster", - ArgsUsage: "[SP actor address...]", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "repo", - EnvVars: []string{"LOTUS_PATH"}, - Hidden: true, - Value: "~/.lotus", - }, - }, - Action: func(cctx *cli.Context) error { - if cctx.Args().Len() < 1 { - return xerrors.New("must specify at least one SP actor address. Use 'lotus-shed miner create' or use 'curio guided-setup'") - } - - ctx := cctx.Context - - db, err := deps.MakeDB(cctx) - if err != nil { - return err - } - - full, closer, err := cliutil.GetFullNodeAPIV1(cctx) - if err != nil { - return xerrors.Errorf("connecting to full node: %w", err) - } - defer closer() - - ainfo, err := cliutil.GetAPIInfo(cctx, repo.FullNode) - if err != nil { - return xerrors.Errorf("could not get API info for FullNode: %w", err) - } - - token, err := full.AuthNew(ctx, api.AllPermissions) - if err != nil { - return err - } - - return deps.CreateMinerConfig(ctx, full, db, cctx.Args().Slice(), fmt.Sprintf("%s:%s", string(token), ainfo.Addr)) - }, -} diff --git a/cmd/curio/config_test.go b/cmd/curio/config_test.go deleted file mode 100644 index 8043017d5ea..00000000000 --- a/cmd/curio/config_test.go +++ /dev/null @@ -1,438 +0,0 @@ -package main - -import ( - "reflect" - "testing" - "time" - - "github.com/invopop/jsonschema" - "github.com/samber/lo" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/node/config" -) - -var baseText = ` -[Subsystems] - # EnableWindowPost enables window post to be executed on this curio instance. Each machine in the cluster - # with WindowPoSt enabled will also participate in the window post scheduler. It is possible to have multiple - # machines with WindowPoSt enabled which will provide redundancy, and in case of multiple partitions per deadline, - # will allow for parallel processing of partitions. - # - # It is possible to have instances handling both WindowPoSt and WinningPoSt, which can provide redundancy without - # the need for additional machines. In setups like this it is generally recommended to run - # partitionsPerDeadline+1 machines. - # - # type: bool - #EnableWindowPost = false - - # type: int - #WindowPostMaxTasks = 0 - - # EnableWinningPost enables winning post to be executed on this curio instance. - # Each machine in the cluster with WinningPoSt enabled will also participate in the winning post scheduler. - # It is possible to mix machines with WindowPoSt and WinningPoSt enabled, for details see the EnableWindowPost - # documentation. - # - # type: bool - #EnableWinningPost = false - - # type: int - #WinningPostMaxTasks = 0 - - # EnableParkPiece enables the "piece parking" task to run on this node. This task is responsible for fetching - # pieces from the network and storing them in the storage subsystem until sectors are sealed. This task is - # only applicable when integrating with boost, and should be enabled on nodes which will hold deal data - # from boost until sectors containing the related pieces have the TreeD/TreeR constructed. - # Note that future Curio implementations will have a separate task type for fetching pieces from the internet. - # - # type: bool - #EnableParkPiece = false - - # type: int - #ParkPieceMaxTasks = 0 - - # EnableSealSDR enables SDR tasks to run. SDR is the long sequential computation - # creating 11 layer files in sector cache directory. - # - # SDR is the first task in the sealing pipeline. It's inputs are just the hash of the - # unsealed data (CommD), sector number, miner id, and the seal proof type. - # It's outputs are the 11 layer files in the sector cache directory. - # - # In lotus-miner this was run as part of PreCommit1. - # - # type: bool - #EnableSealSDR = false - - # The maximum amount of SDR tasks that can run simultaneously. Note that the maximum number of tasks will - # also be bounded by resources available on the machine. - # - # type: int - #SealSDRMaxTasks = 0 - - # EnableSealSDRTrees enables the SDR pipeline tree-building task to run. - # This task handles encoding of unsealed data into last sdr layer and building - # of TreeR, TreeC and TreeD. - # - # This task runs after SDR - # TreeD is first computed with optional input of unsealed data - # TreeR is computed from replica, which is first computed as field - # addition of the last SDR layer and the bottom layer of TreeD (which is the unsealed data) - # TreeC is computed from the 11 SDR layers - # The 3 trees will later be used to compute the PoRep proof. - # - # In case of SyntheticPoRep challenges for PoRep will be pre-generated at this step, and trees and layers - # will be dropped. SyntheticPoRep works by pre-generating a very large set of challenges (~30GiB on disk) - # then using a small subset of them for the actual PoRep computation. This allows for significant scratch space - # saving between PreCommit and PoRep generation at the expense of more computation (generating challenges in this step) - # - # In lotus-miner this was run as part of PreCommit2 (TreeD was run in PreCommit1). - # Note that nodes with SDRTrees enabled will also answer to Finalize tasks, - # which just remove unneeded tree data after PoRep is computed. - # - # type: bool - #EnableSealSDRTrees = false - - # The maximum amount of SealSDRTrees tasks that can run simultaneously. Note that the maximum number of tasks will - # also be bounded by resources available on the machine. - # - # type: int - #SealSDRTreesMaxTasks = 0 - - # FinalizeMaxTasks is the maximum amount of finalize tasks that can run simultaneously. - # The finalize task is enabled on all machines which also handle SDRTrees tasks. Finalize ALWAYS runs on whichever - # machine holds sector cache files, as it removes unneeded tree data after PoRep is computed. - # Finalize will run in parallel with the SubmitCommitMsg task. - # - # type: int - #FinalizeMaxTasks = 0 - - # EnableSendPrecommitMsg enables the sending of precommit messages to the chain - # from this curio instance. - # This runs after SDRTrees and uses the output CommD / CommR (roots of TreeD / TreeR) for the message - # - # type: bool - #EnableSendPrecommitMsg = false - - # EnablePoRepProof enables the computation of the porep proof - # - # This task runs after interactive-porep seed becomes available, which happens 150 epochs (75min) after the - # precommit message lands on chain. This task should run on a machine with a GPU. Vanilla PoRep proofs are - # requested from the machine which holds sector cache files which most likely is the machine which ran the SDRTrees - # task. - # - # In lotus-miner this was Commit1 / Commit2 - # - # type: bool - #EnablePoRepProof = false - - # The maximum amount of PoRepProof tasks that can run simultaneously. Note that the maximum number of tasks will - # also be bounded by resources available on the machine. - # - # type: int - #PoRepProofMaxTasks = 0 - - # EnableSendCommitMsg enables the sending of commit messages to the chain - # from this curio instance. - # - # type: bool - #EnableSendCommitMsg = false - - # EnableMoveStorage enables the move-into-long-term-storage task to run on this curio instance. - # This tasks should only be enabled on nodes with long-term storage. - # - # The MoveStorage task is the last task in the sealing pipeline. It moves the sealed sector data from the - # SDRTrees machine into long-term storage. This task runs after the Finalize task. - # - # type: bool - #EnableMoveStorage = false - - # The maximum amount of MoveStorage tasks that can run simultaneously. Note that the maximum number of tasks will - # also be bounded by resources available on the machine. It is recommended that this value is set to a number which - # uses all available network (or disk) bandwidth on the machine without causing bottlenecks. - # - # type: int - #MoveStorageMaxTasks = 0 - - # EnableWebGui enables the web GUI on this curio instance. The UI has minimal local overhead, but it should - # only need to be run on a single machine in the cluster. - # - # type: bool - #EnableWebGui = false - - # The address that should listen for Web GUI requests. - # - # type: string - #GuiAddress = ":4701" - - -[Fees] - # type: types.FIL - #DefaultMaxFee = "0.07 FIL" - - # type: types.FIL - #MaxPreCommitGasFee = "0.025 FIL" - - # type: types.FIL - #MaxCommitGasFee = "0.05 FIL" - - # type: types.FIL - #MaxTerminateGasFee = "0.5 FIL" - - # WindowPoSt is a high-value operation, so the default fee should be high. - # - # type: types.FIL - #MaxWindowPoStGasFee = "5 FIL" - - # type: types.FIL - #MaxPublishDealsFee = "0.05 FIL" - - [Fees.MaxPreCommitBatchGasFee] - # type: types.FIL - #Base = "0 FIL" - - # type: types.FIL - #PerSector = "0.02 FIL" - - [Fees.MaxCommitBatchGasFee] - # type: types.FIL - #Base = "0 FIL" - - # type: types.FIL - #PerSector = "0.03 FIL" - -[[Addresses]] - #PreCommitControl = [] - - #CommitControl = [] - - #TerminateControl = [] - - #DisableOwnerFallback = false - - #DisableWorkerFallback = false - - MinerAddresses = ["t01013"] - - -[[Addresses]] - #PreCommitControl = [] - - #CommitControl = [] - - #TerminateControl = [] - - #DisableOwnerFallback = false - - #DisableWorkerFallback = false - - #MinerAddresses = [] - - -[[Addresses]] - #PreCommitControl = [] - - #CommitControl = [] - - #TerminateControl = [] - - #DisableOwnerFallback = false - - #DisableWorkerFallback = false - - MinerAddresses = ["t01006"] - - -[Proving] - # Maximum number of sector checks to run in parallel. (0 = unlimited) - # - # WARNING: Setting this value too high may make the node crash by running out of stack - # WARNING: Setting this value too low may make sector challenge reading much slower, resulting in failed PoSt due - # to late submission. - # - # After changing this option, confirm that the new value works in your setup by invoking - # 'lotus-miner proving compute window-post 0' - # - # type: int - #ParallelCheckLimit = 32 - - # Maximum amount of time a proving pre-check can take for a sector. If the check times out the sector will be skipped - # - # WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the - # test challenge took longer than this timeout - # WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this sector are - # blocked (e.g. in case of disconnected NFS mount) - # - # type: Duration - #SingleCheckTimeout = "10m0s" - - # Maximum amount of time a proving pre-check can take for an entire partition. If the check times out, sectors in - # the partition which didn't get checked on time will be skipped - # - # WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the - # test challenge took longer than this timeout - # WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this partition are - # blocked or slow - # - # type: Duration - #PartitionCheckTimeout = "20m0s" - - # Disable Window PoSt computation on the lotus-miner process even if no window PoSt workers are present. - # - # WARNING: If no windowPoSt workers are connected, window PoSt WILL FAIL resulting in faulty sectors which will need - # to be recovered. Before enabling this option, make sure your PoSt workers work correctly. - # - # After changing this option, confirm that the new value works in your setup by invoking - # 'lotus-miner proving compute window-post 0' - # - # type: bool - #DisableBuiltinWindowPoSt = false - - # Disable Winning PoSt computation on the lotus-miner process even if no winning PoSt workers are present. - # - # WARNING: If no WinningPoSt workers are connected, Winning PoSt WILL FAIL resulting in lost block rewards. - # Before enabling this option, make sure your PoSt workers work correctly. - # - # type: bool - #DisableBuiltinWinningPoSt = false - - # Disable WindowPoSt provable sector readability checks. - # - # In normal operation, when preparing to compute WindowPoSt, lotus-miner will perform a round of reading challenges - # from all sectors to confirm that those sectors can be proven. Challenges read in this process are discarded, as - # we're only interested in checking that sector data can be read. - # - # When using builtin proof computation (no PoSt workers, and DisableBuiltinWindowPoSt is set to false), this process - # can save a lot of time and compute resources in the case that some sectors are not readable - this is caused by - # the builtin logic not skipping snark computation when some sectors need to be skipped. - # - # When using PoSt workers, this process is mostly redundant, with PoSt workers challenges will be read once, and - # if challenges for some sectors aren't readable, those sectors will just get skipped. - # - # Disabling sector pre-checks will slightly reduce IO load when proving sectors, possibly resulting in shorter - # time to produce window PoSt. In setups with good IO capabilities the effect of this option on proving time should - # be negligible. - # - # NOTE: It likely is a bad idea to disable sector pre-checks in setups with no PoSt workers. - # - # NOTE: Even when this option is enabled, recovering sectors will be checked before recovery declaration message is - # sent to the chain - # - # After changing this option, confirm that the new value works in your setup by invoking - # 'lotus-miner proving compute window-post 0' - # - # type: bool - #DisableWDPoStPreChecks = false - - # Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (3 in nv21) - # - # A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors. - # // - # Note that setting this value lower may result in less efficient gas use - more messages will be sent, - # to prove each deadline, resulting in more total gas use (but each message will have lower gas limit) - # - # Setting this value above the network limit has no effect - # - # type: int - #MaxPartitionsPerPoStMessage = 0 - - # In some cases when submitting DeclareFaultsRecovered messages, - # there may be too many recoveries to fit in a BlockGasLimit. - # In those cases it may be necessary to set this value to something low (eg 1); - # Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed, - # resulting in more total gas use (but each message will have lower gas limit) - # - # type: int - #MaxPartitionsPerRecoveryMessage = 0 - - # Enable single partition per PoSt Message for partitions containing recovery sectors - # - # In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be - # too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition - # with recovering sectors in the post message - # - # Note that setting this value lower may result in less efficient gas use - more messages will be sent, - # to prove each deadline, resulting in more total gas use (but each message will have lower gas limit) - # - # type: bool - #SingleRecoveringPartitionPerPostMessage = false - - -[Journal] - # Events of the form: "system1:event1,system1:event2[,...]" - # - # type: string - #DisabledEvents = "" - - -[Apis] - # ChainApiInfo is the API endpoint for the Lotus daemon. - # - # type: []string - ChainApiInfo = ["eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0.T_jmG4DTs9Zjd7rr78862lT7D2U63uz-zqcUKHwcqaU:/dns/localhost/tcp/1234/http"] - - # RPC Secret for the storage subsystem. - # If integrating with lotus-miner this must match the value from - # cat ~/.lotusminer/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU | jq -r .PrivateKey - # - # type: string - StorageRPCSecret = "HxHe8YLHiY0LjHVw/WT/4XQkPGgRyCEYk+xiFi0Ob0o=" - -` - -func TestConfig(t *testing.T) { - baseCfg := config.DefaultCurioConfig() - - addr1 := config.CurioAddresses{ - PreCommitControl: []string{}, - CommitControl: []string{}, - TerminateControl: []string{"t3qroiebizgkz7pvj26reg5r5mqiftrt5hjdske2jzjmlacqr2qj7ytjncreih2mvujxoypwpfusmwpipvxncq"}, - DisableOwnerFallback: false, - DisableWorkerFallback: false, - MinerAddresses: []string{"t01000"}, - } - - addr2 := config.CurioAddresses{ - MinerAddresses: []string{"t01001"}, - } - - _, err := deps.LoadConfigWithUpgrades(baseText, baseCfg) - require.NoError(t, err) - - baseCfg.Addresses = append(baseCfg.Addresses, addr1) - baseCfg.Addresses = lo.Filter(baseCfg.Addresses, func(a config.CurioAddresses, _ int) bool { - return len(a.MinerAddresses) > 0 - }) - - _, err = config.ConfigUpdate(baseCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) - require.NoError(t, err) - - baseCfg.Addresses = append(baseCfg.Addresses, addr2) - baseCfg.Addresses = lo.Filter(baseCfg.Addresses, func(a config.CurioAddresses, _ int) bool { - return len(a.MinerAddresses) > 0 - }) - - _, err = config.ConfigUpdate(baseCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) - require.NoError(t, err) - -} - -func TestCustomConfigDurationJson(t *testing.T) { - ref := new(jsonschema.Reflector) - ref.Mapper = func(i reflect.Type) *jsonschema.Schema { - if i == reflect.TypeOf(config.Duration(time.Second)) { - return &jsonschema.Schema{ - Type: "string", - Format: "duration", - } - } - return nil - } - - sch := ref.Reflect(config.CurioConfig{}) - definitions := sch.Definitions["CurioProvingConfig"] - prop, ok := definitions.Properties.Get("SingleCheckTimeout") - require.True(t, ok) - require.Equal(t, prop.Type, "string") -} diff --git a/cmd/curio/deps/apiinfo.go b/cmd/curio/deps/apiinfo.go deleted file mode 100644 index 0dd96d81735..00000000000 --- a/cmd/curio/deps/apiinfo.go +++ /dev/null @@ -1,94 +0,0 @@ -package deps - -import ( - "fmt" - "net/http" - - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-jsonrpc" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/client" - "github.com/filecoin-project/lotus/api/v1api" - cliutil "github.com/filecoin-project/lotus/cli/util" -) - -func getFullNodeAPIV1Curio(ctx *cli.Context, ainfoCfg []string, opts ...cliutil.GetFullNodeOption) (v1api.FullNode, jsonrpc.ClientCloser, error) { - if tn, ok := ctx.App.Metadata["testnode-full"]; ok { - return tn.(v1api.FullNode), func() {}, nil - } - - var options cliutil.GetFullNodeOptions - for _, opt := range opts { - opt(&options) - } - - var rpcOpts []jsonrpc.Option - if options.EthSubHandler != nil { - rpcOpts = append(rpcOpts, jsonrpc.WithClientHandler("Filecoin", options.EthSubHandler), jsonrpc.WithClientHandlerAlias("eth_subscription", "Filecoin.EthSubscription")) - } - - var httpHeads []httpHead - version := "v1" - { - if len(ainfoCfg) == 0 { - return nil, nil, xerrors.Errorf("could not get API info: none configured. \nConsider getting base.toml with './curio config get base >/tmp/base.toml' \nthen adding \n[APIs] \n ChainApiInfo = [\" result_from lotus auth api-info --perm=admin \"]\n and updating it with './curio config set /tmp/base.toml'") - } - for _, i := range ainfoCfg { - ainfo := cliutil.ParseApiInfo(i) - addr, err := ainfo.DialArgs(version) - if err != nil { - return nil, nil, xerrors.Errorf("could not get DialArgs: %w", err) - } - httpHeads = append(httpHeads, httpHead{addr: addr, header: ainfo.AuthHeader()}) - } - } - - if cliutil.IsVeryVerbose { - _, _ = fmt.Fprintln(ctx.App.Writer, "using full node API v1 endpoint:", httpHeads[0].addr) - } - - var fullNodes []api.FullNode - var closers []jsonrpc.ClientCloser - - for _, head := range httpHeads { - v1api, closer, err := client.NewFullNodeRPCV1(ctx.Context, head.addr, head.header, rpcOpts...) - if err != nil { - log.Warnf("Not able to establish connection to node with addr: %s, Reason: %s", head.addr, err.Error()) - continue - } - fullNodes = append(fullNodes, v1api) - closers = append(closers, closer) - } - - // When running in cluster mode and trying to establish connections to multiple nodes, fail - // if less than 2 lotus nodes are actually running - if len(httpHeads) > 1 && len(fullNodes) < 2 { - return nil, nil, xerrors.Errorf("Not able to establish connection to more than a single node") - } - - finalCloser := func() { - for _, c := range closers { - c() - } - } - - var v1API api.FullNodeStruct - cliutil.FullNodeProxy(fullNodes, &v1API) - - v, err := v1API.Version(ctx.Context) - if err != nil { - return nil, nil, err - } - if !v.APIVersion.EqMajorMinor(api.FullAPIVersion1) { - return nil, nil, xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", api.FullAPIVersion1, v.APIVersion) - } - return &v1API, finalCloser, nil -} - -type httpHead struct { - addr string - header http.Header -} diff --git a/cmd/curio/deps/deps.go b/cmd/curio/deps/deps.go deleted file mode 100644 index 2050a1cafba..00000000000 --- a/cmd/curio/deps/deps.go +++ /dev/null @@ -1,537 +0,0 @@ -// Package deps provides the dependencies for the curio node. -package deps - -import ( - "context" - "crypto/rand" - "database/sql" - "encoding/base64" - "errors" - "fmt" - "io" - "net" - "net/http" - "net/url" - "os" - "path/filepath" - "strings" - - "github.com/BurntSushi/toml" - "github.com/gbrlsnchs/jwt/v3" - ds "github.com/ipfs/go-datastore" - dssync "github.com/ipfs/go-datastore/sync" - logging "github.com/ipfs/go-log/v2" - "github.com/samber/lo" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-jsonrpc/auth" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-statestore" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/v1api" - "github.com/filecoin-project/lotus/chain/types" - curio "github.com/filecoin-project/lotus/curiosrc" - "github.com/filecoin-project/lotus/curiosrc/multictladdr" - "github.com/filecoin-project/lotus/journal" - "github.com/filecoin-project/lotus/journal/alerting" - "github.com/filecoin-project/lotus/journal/fsjournal" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/node/modules" - "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/node/repo" - "github.com/filecoin-project/lotus/storage/paths" - "github.com/filecoin-project/lotus/storage/sealer" - "github.com/filecoin-project/lotus/storage/sealer/ffiwrapper" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -var log = logging.Logger("curio/deps") - -func MakeDB(cctx *cli.Context) (*harmonydb.DB, error) { - // #1 CLI opts - fromCLI := func() (*harmonydb.DB, error) { - dbConfig := config.HarmonyDB{ - Username: cctx.String("db-user"), - Password: cctx.String("db-password"), - Hosts: strings.Split(cctx.String("db-host"), ","), - Database: cctx.String("db-name"), - Port: cctx.String("db-port"), - } - return harmonydb.NewFromConfig(dbConfig) - } - - readToml := func(path string) (*harmonydb.DB, error) { - cfg, err := config.FromFile(path) - if err != nil { - return nil, err - } - if c, ok := cfg.(*config.StorageMiner); ok { - return harmonydb.NewFromConfig(c.HarmonyDB) - } - return nil, errors.New("not a miner config") - } - - // #2 Try local miner config - fromMinerEnv := func() (*harmonydb.DB, error) { - v := os.Getenv("LOTUS_MINER_PATH") - if v == "" { - return nil, errors.New("no miner env") - } - return readToml(filepath.Join(v, "config.toml")) - - } - - fromMiner := func() (*harmonydb.DB, error) { - u, err := os.UserHomeDir() - if err != nil { - return nil, err - } - return readToml(filepath.Join(u, ".lotusminer/config.toml")) - } - fromEnv := func() (*harmonydb.DB, error) { - // #3 Try env - u, err := url.Parse(os.Getenv("CURIO_DB")) - if err != nil { - return nil, errors.New("no db connection string found in CURIO_DB env") - } - cfg := config.DefaultStorageMiner().HarmonyDB - if u.User.Username() != "" { - cfg.Username = u.User.Username() - } - if p, ok := u.User.Password(); ok && p != "" { - cfg.Password = p - } - if u.Hostname() != "" { - cfg.Hosts = []string{u.Hostname()} - } - if u.Port() != "" { - cfg.Port = u.Port() - } - if strings.TrimPrefix(u.Path, "/") != "" { - cfg.Database = strings.TrimPrefix(u.Path, "/") - } - - return harmonydb.NewFromConfig(cfg) - } - - for _, f := range []func() (*harmonydb.DB, error){fromCLI, fromMinerEnv, fromMiner, fromEnv} { - db, err := f() - if err != nil { - continue - } - return db, nil - } - log.Error("No db connection string found. User CLI args or env var: set CURIO_DB=postgres://USER:PASSWORD@HOST:PORT/DATABASE") - return fromCLI() //in-case it's not about bad config. -} - -type JwtPayload struct { - Allow []auth.Permission -} - -func StorageAuth(apiKey string) (sealer.StorageAuth, error) { - if apiKey == "" { - return nil, xerrors.Errorf("no api key provided") - } - - rawKey, err := base64.StdEncoding.DecodeString(apiKey) - if err != nil { - return nil, xerrors.Errorf("decoding api key: %w", err) - } - - key := jwt.NewHS256(rawKey) - - p := JwtPayload{ - Allow: []auth.Permission{"admin"}, - } - - token, err := jwt.Sign(&p, key) - if err != nil { - return nil, err - } - - headers := http.Header{} - headers.Add("Authorization", "Bearer "+string(token)) - return sealer.StorageAuth(headers), nil -} - -func GetDeps(ctx context.Context, cctx *cli.Context) (*Deps, error) { - var deps Deps - return &deps, deps.PopulateRemainingDeps(ctx, cctx, true) -} - -type Deps struct { - Layers []string - Cfg *config.CurioConfig // values - DB *harmonydb.DB // has itest capability - Full api.FullNode - Verif storiface.Verifier - LW *sealer.LocalWorker - As *multictladdr.MultiAddressSelector - Maddrs map[dtypes.MinerAddress]bool - ProofTypes map[abi.RegisteredSealProof]bool - Stor *paths.Remote - Si *paths.DBIndex - LocalStore *paths.Local - LocalPaths *paths.BasicLocalStorage - ListenAddr string -} - -const ( - FlagRepoPath = "repo-path" -) - -func (deps *Deps) PopulateRemainingDeps(ctx context.Context, cctx *cli.Context, makeRepo bool) error { - var err error - if makeRepo { - // Open repo - repoPath := cctx.String(FlagRepoPath) - fmt.Println("repopath", repoPath) - r, err := repo.NewFS(repoPath) - if err != nil { - return err - } - - ok, err := r.Exists() - if err != nil { - return err - } - if !ok { - if err := r.Init(repo.Curio); err != nil { - return err - } - } - } - - if deps.Cfg == nil { - deps.DB, err = MakeDB(cctx) - if err != nil { - return err - } - } - if deps.Layers == nil { - deps.Layers = append([]string{"base"}, cctx.StringSlice("layers")...) // Always stack on top of "base" layer - } - - if deps.Cfg == nil { - // The config feeds into task runners & their helpers - deps.Cfg, err = GetConfig(cctx, deps.DB) - if err != nil { - return xerrors.Errorf("populate config: %w", err) - } - } - - log.Debugw("config", "config", deps.Cfg) - - if deps.Verif == nil { - deps.Verif = ffiwrapper.ProofVerifier - } - - if deps.As == nil { - deps.As, err = curio.AddressSelector(deps.Cfg.Addresses)() - if err != nil { - return err - } - } - - if deps.Si == nil { - de, err := journal.ParseDisabledEvents(deps.Cfg.Journal.DisabledEvents) - if err != nil { - return err - } - j, err := fsjournal.OpenFSJournalPath(cctx.String("journal"), de) - if err != nil { - return err - } - go func() { - <-ctx.Done() - _ = j.Close() - }() - - al := alerting.NewAlertingSystem(j) - deps.Si = paths.NewDBIndex(al, deps.DB) - } - - if deps.Full == nil { - var fullCloser func() - cfgApiInfo := deps.Cfg.Apis.ChainApiInfo - if v := os.Getenv("FULLNODE_API_INFO"); v != "" { - cfgApiInfo = []string{v} - } - deps.Full, fullCloser, err = getFullNodeAPIV1Curio(cctx, cfgApiInfo) - if err != nil { - return err - } - - go func() { - <-ctx.Done() - fullCloser() - }() - } - - deps.LocalPaths = &paths.BasicLocalStorage{ - PathToJSON: cctx.String("storage-json"), - } - - if deps.ListenAddr == "" { - listenAddr := cctx.String("listen") - const unspecifiedAddress = "0.0.0.0" - addressSlice := strings.Split(listenAddr, ":") - if ip := net.ParseIP(addressSlice[0]); ip != nil { - if ip.String() == unspecifiedAddress { - rip, err := deps.DB.GetRoutableIP() - if err != nil { - return err - } - deps.ListenAddr = rip + ":" + addressSlice[1] - } - } - } - if deps.LocalStore == nil { - deps.LocalStore, err = paths.NewLocal(ctx, deps.LocalPaths, deps.Si, []string{"http://" + deps.ListenAddr + "/remote"}) - if err != nil { - return err - } - } - - sa, err := StorageAuth(deps.Cfg.Apis.StorageRPCSecret) - if err != nil { - return xerrors.Errorf(`'%w' while parsing the config toml's - [Apis] - StorageRPCSecret=%v -Get it with: jq .PrivateKey ~/.lotus-miner/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU`, err, deps.Cfg.Apis.StorageRPCSecret) - } - if deps.Stor == nil { - deps.Stor = paths.NewRemote(deps.LocalStore, deps.Si, http.Header(sa), 10, &paths.DefaultPartialFileHandler{}) - } - if deps.LW == nil { - wstates := statestore.New(dssync.MutexWrap(ds.NewMapDatastore())) - - // todo localWorker isn't the abstraction layer we want to use here, we probably want to go straight to ffiwrapper - // maybe with a curio specific abstraction. LocalWorker does persistent call tracking which we probably - // don't need (ehh.. maybe we do, the async callback system may actually work decently well with harmonytask) - deps.LW = sealer.NewLocalWorker(sealer.WorkerConfig{ - MaxParallelChallengeReads: deps.Cfg.Proving.ParallelCheckLimit, - }, deps.Stor, deps.LocalStore, deps.Si, nil, wstates) - } - if deps.Maddrs == nil { - deps.Maddrs = map[dtypes.MinerAddress]bool{} - } - if len(deps.Maddrs) == 0 { - for _, s := range deps.Cfg.Addresses { - for _, s := range s.MinerAddresses { - addr, err := address.NewFromString(s) - if err != nil { - return err - } - deps.Maddrs[dtypes.MinerAddress(addr)] = true - } - } - } - - if deps.ProofTypes == nil { - deps.ProofTypes = map[abi.RegisteredSealProof]bool{} - } - if len(deps.ProofTypes) == 0 { - for maddr := range deps.Maddrs { - spt, err := modules.SealProofType(maddr, deps.Full) - if err != nil { - return err - } - deps.ProofTypes[spt] = true - } - } - - return nil -} - -func LoadConfigWithUpgrades(text string, curioConfigWithDefaults *config.CurioConfig) (toml.MetaData, error) { - // allow migration from old config format that was limited to 1 wallet setup. - newText := strings.Join(lo.Map(strings.Split(text, "\n"), func(line string, _ int) string { - if strings.EqualFold(line, "[addresses]") { - return "[[addresses]]" - } - return line - }), "\n") - meta, err := toml.Decode(newText, &curioConfigWithDefaults) - for i := range curioConfigWithDefaults.Addresses { - if curioConfigWithDefaults.Addresses[i].PreCommitControl == nil { - curioConfigWithDefaults.Addresses[i].PreCommitControl = []string{} - } - if curioConfigWithDefaults.Addresses[i].CommitControl == nil { - curioConfigWithDefaults.Addresses[i].CommitControl = []string{} - } - if curioConfigWithDefaults.Addresses[i].TerminateControl == nil { - curioConfigWithDefaults.Addresses[i].TerminateControl = []string{} - } - } - return meta, err -} -func GetConfig(cctx *cli.Context, db *harmonydb.DB) (*config.CurioConfig, error) { - curioConfig := config.DefaultCurioConfig() - have := []string{} - layers := append([]string{"base"}, cctx.StringSlice("layers")...) // Always stack on top of "base" layer - for _, layer := range layers { - text := "" - err := db.QueryRow(cctx.Context, `SELECT config FROM harmony_config WHERE title=$1`, layer).Scan(&text) - if err != nil { - if strings.Contains(err.Error(), sql.ErrNoRows.Error()) { - return nil, fmt.Errorf("missing layer '%s' ", layer) - } - if layer == "base" { - return nil, errors.New(`curio defaults to a layer named 'base'. - Either use 'migrate' command or edit a base.toml and upload it with: curio config set base.toml`) - } - return nil, fmt.Errorf("could not read layer '%s': %w", layer, err) - } - - meta, err := LoadConfigWithUpgrades(text, curioConfig) - if err != nil { - return curioConfig, fmt.Errorf("could not read layer, bad toml %s: %w", layer, err) - } - for _, k := range meta.Keys() { - have = append(have, strings.Join(k, " ")) - } - log.Debugw("Using layer", "layer", layer, "config", curioConfig) - } - _ = have // FUTURE: verify that required fields are here. - // If config includes 3rd-party config, consider JSONSchema as a way that - // 3rd-parties can dynamically include config requirements and we can - // validate the config. Because of layering, we must validate @ startup. - return curioConfig, nil -} - -func GetDefaultConfig(comment bool) (string, error) { - c := config.DefaultCurioConfig() - cb, err := config.ConfigUpdate(c, nil, config.Commented(comment), config.DefaultKeepUncommented(), config.NoEnv()) - if err != nil { - return "", err - } - return string(cb), nil -} - -func GetDepsCLI(ctx context.Context, cctx *cli.Context) (*Deps, error) { - db, err := MakeDB(cctx) - if err != nil { - return nil, err - } - - cfg, err := GetConfig(cctx, db) - if err != nil { - return nil, err - } - - full, fullCloser, err := getFullNodeAPIV1Curio(cctx, cfg.Apis.ChainApiInfo) - if err != nil { - return nil, err - } - go func() { - select { - case <-ctx.Done(): - fullCloser() - } - }() - - return &Deps{ - Cfg: cfg, - DB: db, - Full: full, - }, nil -} - -func CreateMinerConfig(ctx context.Context, full v1api.FullNode, db *harmonydb.DB, miners []string, info string) error { - var titles []string - err := db.Select(ctx, &titles, `SELECT title FROM harmony_config WHERE LENGTH(config) > 0`) - if err != nil { - return fmt.Errorf("cannot reach the db. Ensure that Yugabyte flags are set correctly to"+ - " reach Yugabyte: %s", err.Error()) - } - - // setup config - curioConfig := config.DefaultCurioConfig() - - for _, addr := range miners { - maddr, err := address.NewFromString(addr) - if err != nil { - return xerrors.Errorf("Invalid address: %s", addr) - } - - _, err = full.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("Failed to get miner info: %w", err) - } - - curioConfig.Addresses = append(curioConfig.Addresses, config.CurioAddresses{ - PreCommitControl: []string{}, - CommitControl: []string{}, - TerminateControl: []string{}, - DisableOwnerFallback: false, - DisableWorkerFallback: false, - MinerAddresses: []string{addr}, - }) - } - - { - sk, err := io.ReadAll(io.LimitReader(rand.Reader, 32)) - if err != nil { - return err - } - - curioConfig.Apis.StorageRPCSecret = base64.StdEncoding.EncodeToString(sk) - } - - { - curioConfig.Apis.ChainApiInfo = append(curioConfig.Apis.ChainApiInfo, info) - } - - curioConfig.Addresses = lo.Filter(curioConfig.Addresses, func(a config.CurioAddresses, _ int) bool { - return len(a.MinerAddresses) > 0 - }) - - // If no base layer is present - if !lo.Contains(titles, "base") { - cb, err := config.ConfigUpdate(curioConfig, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) - if err != nil { - return xerrors.Errorf("Failed to generate default config: %w", err) - } - cfg := string(cb) - _, err = db.Exec(ctx, "INSERT INTO harmony_config (title, config) VALUES ('base', $1)", cfg) - if err != nil { - return xerrors.Errorf("failed to insert the 'base' into the database: %w", err) - } - fmt.Printf("The base layer has been updated with miner[s] %s\n", miners) - return nil - } - - // if base layer is present - baseCfg := config.DefaultCurioConfig() - var baseText string - err = db.QueryRow(ctx, "SELECT config FROM harmony_config WHERE title='base'").Scan(&baseText) - if err != nil { - return xerrors.Errorf("Cannot load base config from database: %w", err) - } - _, err = LoadConfigWithUpgrades(baseText, baseCfg) - if err != nil { - return xerrors.Errorf("Cannot parse base config: %w", err) - } - - baseCfg.Addresses = append(baseCfg.Addresses, curioConfig.Addresses...) - baseCfg.Addresses = lo.Filter(baseCfg.Addresses, func(a config.CurioAddresses, _ int) bool { - return len(a.MinerAddresses) > 0 - }) - - cb, err := config.ConfigUpdate(baseCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) - if err != nil { - return xerrors.Errorf("cannot interpret config: %w", err) - } - _, err = db.Exec(ctx, "UPDATE harmony_config SET config=$1 WHERE title='base'", string(cb)) - if err != nil { - return xerrors.Errorf("cannot update base config: %w", err) - } - fmt.Printf("The base layer has been updated with miner[s] %s\n", miners) - return nil -} diff --git a/cmd/curio/guidedsetup/guidedsetup.go b/cmd/curio/guidedsetup/guidedsetup.go deleted file mode 100644 index 1bdb8e784ca..00000000000 --- a/cmd/curio/guidedsetup/guidedsetup.go +++ /dev/null @@ -1,881 +0,0 @@ -// guidedSetup for migration from lotus-miner to Curio -// -// IF STRINGS CHANGED { -// follow instructions at ../internal/translations/translations.go -// } -package guidedsetup - -import ( - "bytes" - "context" - "crypto/rand" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "math/bits" - "net/http" - "os" - "os/signal" - "path" - "reflect" - "strconv" - "strings" - "syscall" - "time" - - "github.com/BurntSushi/toml" - "github.com/charmbracelet/lipgloss" - "github.com/docker/go-units" - "github.com/manifoldco/promptui" - "github.com/mitchellh/go-homedir" - "github.com/samber/lo" - "github.com/urfave/cli/v2" - "golang.org/x/text/language" - "golang.org/x/text/message" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-jsonrpc" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/v1api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/cli/spcli" - cliutil "github.com/filecoin-project/lotus/cli/util" - "github.com/filecoin-project/lotus/cmd/curio/deps" - _ "github.com/filecoin-project/lotus/cmd/curio/internal/translations" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/node/repo" -) - -// URL to upload user-selected fields to help direct developer's focus. -const DeveloperFocusRequestURL = "https://curiostorage.org/cgi-bin/savedata.php" - -var GuidedsetupCmd = &cli.Command{ - Name: "guided-setup", - Usage: "Run the guided setup for migrating from lotus-miner to Curio or Creating a new Curio miner", - Flags: []cli.Flag{ - &cli.StringFlag{ // for cliutil.GetFullNodeAPI - Name: "repo", - EnvVars: []string{"LOTUS_PATH"}, - Hidden: true, - Value: "~/.lotus", - }, - }, - Action: func(cctx *cli.Context) (err error) { - T, say := SetupLanguage() - setupCtrlC(say) - - // Run the migration steps - migrationData := MigrationData{ - T: T, - say: say, - selectTemplates: &promptui.SelectTemplates{ - Help: T("Use the arrow keys to navigate: ↓ ↑ → ← "), - }, - cctx: cctx, - ctx: cctx.Context, - } - - newOrMigrate(&migrationData) - if migrationData.init { - say(header, "This interactive tool creates a new miner actor and creates the basic configuration layer for it.") - say(notice, "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster < miner ID >' to finish the configuration.") - for _, step := range newMinerSteps { - step(&migrationData) - } - } else { - say(header, "This interactive tool migrates lotus-miner to Curio in 5 minutes.") - say(notice, "Each step needs your confirmation and can be reversed. Press Ctrl+C to exit at any time.") - - for _, step := range migrationSteps { - step(&migrationData) - } - } - - for _, closer := range migrationData.closers { - closer() - } - return nil - }, -} - -func setupCtrlC(say func(style lipgloss.Style, key message.Reference, a ...interface{})) { - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - go func() { - <-c - say(notice, "Ctrl+C pressed in Terminal") - os.Exit(2) - }() -} - -var ( - header = lipgloss.NewStyle(). - Align(lipgloss.Left). - Foreground(lipgloss.Color("#00FF00")). - Background(lipgloss.Color("#242424")). - BorderStyle(lipgloss.NormalBorder()). - Width(60).Margin(1) - - notice = lipgloss.NewStyle(). - Align(lipgloss.Left). - Bold(true). - Foreground(lipgloss.Color("#CCCCCC")). - Background(lipgloss.Color("#333300")).MarginBottom(1) - - green = lipgloss.NewStyle(). - Align(lipgloss.Left). - Foreground(lipgloss.Color("#00FF00")). - Background(lipgloss.Color("#000000")) - - plain = lipgloss.NewStyle().Align(lipgloss.Left) - - section = lipgloss.NewStyle(). - Align(lipgloss.Left). - Foreground(lipgloss.Color("#000000")). - Background(lipgloss.Color("#FFFFFF")). - Underline(true) - - code = lipgloss.NewStyle(). - Align(lipgloss.Left). - Foreground(lipgloss.Color("#00FF00")). - Background(lipgloss.Color("#f8f9fa")) -) - -func SetupLanguage() (func(key message.Reference, a ...interface{}) string, func(style lipgloss.Style, key message.Reference, a ...interface{})) { - langText := "en" - problem := false - if len(os.Getenv("LANG")) > 1 { - langText = os.Getenv("LANG")[:2] - } else { - problem = true - } - - lang, err := language.Parse(langText) - if err != nil { - lang = language.English - problem = true - fmt.Println("Error parsing language") - } - - langs := message.DefaultCatalog.Languages() - have := lo.SliceToMap(langs, func(t language.Tag) (string, bool) { return t.String(), true }) - if _, ok := have[lang.String()]; !ok { - lang = language.English - problem = true - } - if problem { - _ = os.Setenv("LANG", "en-US") // for later users of this function - notice.Copy().AlignHorizontal(lipgloss.Right). - Render("$LANG=" + langText + " unsupported. Available: " + strings.Join(lo.Keys(have), ", ")) - fmt.Println("Defaulting to English. Please reach out to the Curio team if you would like to have additional language support.") - } - return func(key message.Reference, a ...interface{}) string { - return message.NewPrinter(lang).Sprintf(key, a...) - }, func(sty lipgloss.Style, key message.Reference, a ...interface{}) { - msg := message.NewPrinter(lang).Sprintf(key, a...) - fmt.Println(sty.Render(msg)) - } -} - -func newOrMigrate(d *MigrationData) { - i, _, err := (&promptui.Select{ - Label: d.T("I want to:"), - Items: []string{ - d.T("Migrate from existing Lotus-Miner"), - d.T("Create a new miner")}, - Templates: d.selectTemplates, - }).Run() - if err != nil { - d.say(notice, "Aborting remaining steps.", err.Error()) - os.Exit(1) - } - if i == 1 { - d.init = true - } -} - -type migrationStep func(*MigrationData) - -var migrationSteps = []migrationStep{ - readMinerConfig, // Tells them to be on the miner machine - yugabyteConnect, // Miner is updated - configToDB, // work on base configuration migration. - verifySectors, // Verify the sectors are in the database - doc, - oneLastThing, - complete, -} - -type newMinerStep func(data *MigrationData) - -var newMinerSteps = []newMinerStep{ - stepPresteps, - stepCreateActor, - stepNewMinerConfig, - doc, - oneLastThing, - completeInit, -} - -type MigrationData struct { - T func(key message.Reference, a ...interface{}) string - say func(style lipgloss.Style, key message.Reference, a ...interface{}) - selectTemplates *promptui.SelectTemplates - MinerConfigPath string - MinerConfig *config.StorageMiner - DB *harmonydb.DB - MinerID address.Address - full v1api.FullNode - cctx *cli.Context - closers []jsonrpc.ClientCloser - ctx context.Context - owner address.Address - worker address.Address - sender address.Address - ssize abi.SectorSize - confidence uint64 - init bool -} - -func complete(d *MigrationData) { - stepCompleted(d, d.T("Lotus-Miner to Curio Migration.")) - d.say(plain, "Try the web interface with %s for further guided improvements.", code.Render("curio run --layers=gui")) - d.say(plain, "You can now migrate your market node (%s), if applicable.", "Boost") -} - -func completeInit(d *MigrationData) { - stepCompleted(d, d.T("New Miner initialization complete.")) - d.say(plain, "Try the web interface with %s for further guided improvements.", "--layers=gui") -} - -func configToDB(d *MigrationData) { - d.say(section, "Migrating lotus-miner config.toml to Curio in-database configuration.") - - { - var closer jsonrpc.ClientCloser - var err error - d.full, closer, err = cliutil.GetFullNodeAPIV1(d.cctx) - d.closers = append(d.closers, closer) - if err != nil { - d.say(notice, "Error getting API: %s", err.Error()) - os.Exit(1) - } - } - ainfo, err := cliutil.GetAPIInfo(d.cctx, repo.FullNode) - if err != nil { - d.say(notice, "could not get API info for FullNode: %w", err) - os.Exit(1) - } - token, err := d.full.AuthNew(context.Background(), api.AllPermissions) - if err != nil { - d.say(notice, "Error getting token: %s", err.Error()) - os.Exit(1) - } - - chainApiInfo := fmt.Sprintf("%s:%s", string(token), ainfo.Addr) - - d.MinerID, err = SaveConfigToLayer(d.MinerConfigPath, chainApiInfo) - if err != nil { - d.say(notice, "Error saving config to layer: %s. Aborting Migration", err.Error()) - os.Exit(1) - } -} - -// bucket returns the power's 4 highest bits (rounded down). -func bucket(power *api.MinerPower) uint64 { - rawQAP := power.TotalPower.QualityAdjPower.Uint64() - magnitude := lo.Max([]int{bits.Len64(rawQAP), 5}) - - // shifting erases resolution so we cannot distinguish SPs of similar scales. - return rawQAP >> (uint64(magnitude) - 4) << (uint64(magnitude - 4)) -} - -type uploadType int - -const uploadTypeIndividual uploadType = 0 -const uploadTypeAggregate uploadType = 1 - -// const uploadTypeHint uploadType = 2 -const uploadTypeNothing uploadType = 3 - -func oneLastThing(d *MigrationData) { - d.say(section, "The Curio team wants to improve the software you use. Tell the team you're using `%s`.", "curio") - i, _, err := (&promptui.Select{ - Label: d.T("Select what you want to share with the Curio team."), - Items: []string{ - d.T("Individual Data: Miner ID, Curio version, chain (%s or %s). Signed.", "mainnet", "calibration"), - d.T("Aggregate-Anonymous: version, chain, and Miner power (bucketed)."), - d.T("Hint: I am someone running Curio on whichever chain."), - d.T("Nothing.")}, - Templates: d.selectTemplates, - }).Run() - preference := uploadType(i) - if err != nil { - d.say(notice, "Aborting remaining steps.", err.Error()) - os.Exit(1) - } - if preference != uploadTypeNothing { - msgMap := map[string]any{ - "domain": "curio-newuser", - "net": build.BuildTypeString(), - } - if preference == uploadTypeIndividual || preference == uploadTypeAggregate { - // articles of incorporation - power, err := d.full.StateMinerPower(context.Background(), d.MinerID, types.EmptyTSK) - if err != nil { - d.say(notice, "Error getting miner power: %s", err.Error()) - os.Exit(1) - } - msgMap["version"] = build.BuildVersion - msgMap["net"] = build.BuildType - msgMap["power"] = map[uploadType]uint64{ - uploadTypeIndividual: power.MinerPower.QualityAdjPower.Uint64(), - uploadTypeAggregate: bucket(power)}[preference] - - if preference == uploadTypeIndividual { // Sign it - msgMap["miner_id"] = d.MinerID - msg, err := json.Marshal(msgMap) - if err != nil { - d.say(notice, "Error marshalling message: %s", err.Error()) - os.Exit(1) - } - mi, err := d.full.StateMinerInfo(context.Background(), d.MinerID, types.EmptyTSK) - if err != nil { - d.say(notice, "Error getting miner info: %s", err.Error()) - os.Exit(1) - } - sig, err := d.full.WalletSign(context.Background(), mi.Worker, msg) - if err != nil { - d.say(notice, "Error signing message: %s", err.Error()) - os.Exit(1) - } - msgMap["signature"] = base64.StdEncoding.EncodeToString(sig.Data) - } - } - msg, err := json.Marshal(msgMap) - if err != nil { - d.say(notice, "Error marshalling message: %s", err.Error()) - os.Exit(1) - } - - resp, err := http.DefaultClient.Post(DeveloperFocusRequestURL, "application/json", bytes.NewReader(msg)) - if err != nil { - d.say(notice, "Error sending message: %s", err.Error()) - } - if resp != nil { - defer func() { _ = resp.Body.Close() }() - if resp.StatusCode != 200 { - b, err := io.ReadAll(resp.Body) - if err == nil { - d.say(notice, "Error sending message: Status %s, Message: ", resp.Status, string(b)) - } - } else { - stepCompleted(d, d.T("Message sent.")) - } - } - } -} - -func doc(d *MigrationData) { - d.say(plain, "Documentation: ") - d.say(plain, "The '%s' layer stores common configuration. All curio instances can include it in their %s argument.", "base", "--layers") - d.say(plain, "You can add other layers for per-machine configuration changes.") - - d.say(plain, "Filecoin %s channels: %s and %s", "Slack", "#fil-curio-help", "#fil-curio-dev") - - d.say(plain, "Increase reliability using redundancy: start multiple machines with at-least the post layer: 'curio run --layers=post'") - //d.say(plain, "Point your browser to your web GUI to complete setup with %s and advanced featues.", "Boost") - d.say(plain, "One database can serve multiple miner IDs: Run a migration for each lotus-miner.") -} - -func verifySectors(d *MigrationData) { - var i []int - var lastError string - fmt.Println() - d.say(section, "Please start (or restart) %s now that database credentials are in %s.", "lotus-miner", "config.toml") - d.say(notice, "Waiting for %s to write sectors into Yugabyte.", "lotus-miner") - - mid, err := address.IDFromAddress(d.MinerID) - if err != nil { - d.say(notice, "Error interpreting miner ID: %s: ID: %s", err.Error(), d.MinerID.String()) - os.Exit(1) - } - - for { - err := d.DB.Select(context.Background(), &i, ` - SELECT count(*) FROM sector_location WHERE miner_id=$1`, mid) - if err != nil { - if err.Error() != lastError { - d.say(notice, "Error verifying sectors: %s", err.Error()) - lastError = err.Error() - } - continue - } - if i[0] > 0 { - break - } - fmt.Print(".") - time.Sleep(5 * time.Second) - } - d.say(plain, "The sectors are in the database. The database is ready for %s.", "Curio") - d.say(notice, "Now shut down lotus-miner and lotus-worker and use run %s instead.", code.Render("curio run")) - - _, err = (&promptui.Prompt{Label: d.T("Press return to continue")}).Run() - if err != nil { - d.say(notice, "Aborting migration.") - os.Exit(1) - } - stepCompleted(d, d.T("Sectors verified. %d sector locations found.", i)) -} - -func yugabyteConnect(d *MigrationData) { - harmonyCfg := config.DefaultStorageMiner().HarmonyDB //copy the config to a local variable - if d.MinerConfig != nil { - harmonyCfg = d.MinerConfig.HarmonyDB //copy the config to a local variable - } - var err error - d.DB, err = harmonydb.NewFromConfig(harmonyCfg) - if err != nil { - hcfg := getDBDetails(d) - harmonyCfg = *hcfg - } - - d.say(plain, "Connected to Yugabyte. Schema is current.") - if !reflect.DeepEqual(harmonyCfg, d.MinerConfig.HarmonyDB) || !d.MinerConfig.Subsystems.EnableSectorIndexDB { - d.MinerConfig.HarmonyDB = harmonyCfg - d.MinerConfig.Subsystems.EnableSectorIndexDB = true - - d.say(plain, "Enabling Sector Indexing in the database.") - buf, err := config.ConfigUpdate(d.MinerConfig, config.DefaultStorageMiner()) - if err != nil { - d.say(notice, "Error encoding config.toml: %s", err.Error()) - os.Exit(1) - } - _, err = (&promptui.Prompt{ - Label: d.T("Press return to update %s with Yugabyte info. A Backup file will be written to that folder before changes are made.", "config.toml")}).Run() - if err != nil { - os.Exit(1) - } - p, err := homedir.Expand(d.MinerConfigPath) - if err != nil { - d.say(notice, "Error expanding path: %s", err.Error()) - os.Exit(1) - } - tomlPath := path.Join(p, "config.toml") - stat, err := os.Stat(tomlPath) - if err != nil { - d.say(notice, "Error reading filemode of config.toml: %s", err.Error()) - os.Exit(1) - } - fBackup, err := os.CreateTemp(p, "config-backup-*.toml") - if err != nil { - d.say(notice, "Error creating backup file: %s", err.Error()) - os.Exit(1) - } - fBackupContents, err := os.ReadFile(tomlPath) - if err != nil { - d.say(notice, "Error reading config.toml: %s", err.Error()) - os.Exit(1) - } - _, err = fBackup.Write(fBackupContents) - if err != nil { - d.say(notice, "Error writing backup file: %s", err.Error()) - os.Exit(1) - } - err = fBackup.Close() - if err != nil { - d.say(notice, "Error closing backup file: %s", err.Error()) - os.Exit(1) - } - - filemode := stat.Mode() - err = os.WriteFile(path.Join(p, "config.toml"), buf, filemode) - if err != nil { - d.say(notice, "Error writing config.toml: %s", err.Error()) - os.Exit(1) - } - d.say(section, "Restart Lotus Miner. ") - } - stepCompleted(d, d.T("Connected to Yugabyte")) -} - -func readMinerConfig(d *MigrationData) { - d.say(plain, "To start, ensure your sealing pipeline is drained and shut-down lotus-miner.") - - verifyPath := func(dir string) (*config.StorageMiner, error) { - cfg := config.DefaultStorageMiner() - dir, err := homedir.Expand(dir) - if err != nil { - return nil, err - } - _, err = toml.DecodeFile(path.Join(dir, "config.toml"), &cfg) - return cfg, err - } - - dirs := map[string]*config.StorageMiner{"~/.lotusminer": nil, "~/.lotus-miner-local-net": nil} - if v := os.Getenv("LOTUS_MINER_PATH"); v != "" { - dirs[v] = nil - } - for dir := range dirs { - cfg, err := verifyPath(dir) - if err != nil { - delete(dirs, dir) - } - dirs[dir] = cfg - } - - var otherPath bool - if len(dirs) > 0 { - _, str, err := (&promptui.Select{ - Label: d.T("Select the location of your lotus-miner config directory?"), - Items: append(lo.Keys(dirs), d.T("Other")), - Templates: d.selectTemplates, - }).Run() - if err != nil { - if err.Error() == "^C" { - os.Exit(1) - } - otherPath = true - } else { - if str == d.T("Other") { - otherPath = true - } else { - d.MinerConfigPath = str - d.MinerConfig = dirs[str] - } - } - } - if otherPath { - minerPathEntry: - str, err := (&promptui.Prompt{ - Label: d.T("Enter the path to the configuration directory used by %s", "lotus-miner"), - }).Run() - if err != nil { - d.say(notice, "No path provided, abandoning migration ") - os.Exit(1) - } - cfg, err := verifyPath(str) - if err != nil { - d.say(notice, "Cannot read the config.toml file in the provided directory, Error: %s", err.Error()) - goto minerPathEntry - } - d.MinerConfigPath = str - d.MinerConfig = cfg - } - - // Try to lock Miner repo to verify that lotus-miner is not running - { - r, err := repo.NewFS(d.MinerConfigPath) - if err != nil { - d.say(plain, "Could not create repo from directory: %s. Aborting migration", err.Error()) - os.Exit(1) - } - lr, err := r.Lock(repo.StorageMiner) - if err != nil { - d.say(plain, "Could not lock miner repo. Your miner must be stopped: %s\n Aborting migration", err.Error()) - os.Exit(1) - } - _ = lr.Close() - } - - stepCompleted(d, d.T("Read Miner Config")) -} -func stepCompleted(d *MigrationData, step string) { - fmt.Print(green.Render("✔ ")) - d.say(plain, "Step Complete: %s\n", step) -} - -func stepCreateActor(d *MigrationData) { - d.say(plain, "Initializing a new miner actor.") - - for { - i, _, err := (&promptui.Select{ - Label: d.T("Enter the info to create a new miner"), - Items: []string{ - d.T("Owner Address: %s", d.owner.String()), - d.T("Worker Address: %s", d.worker.String()), - d.T("Sender Address: %s", d.sender.String()), - d.T("Sector Size: %d", d.ssize), - d.T("Confidence epochs: %d", d.confidence), - d.T("Continue to verify the addresses and create a new miner actor.")}, - Size: 6, - Templates: d.selectTemplates, - }).Run() - if err != nil { - d.say(notice, "Miner creation error occurred: %s ", err.Error()) - os.Exit(1) - } - switch i { - case 0: - owner, err := (&promptui.Prompt{ - Label: d.T("Enter the owner address"), - }).Run() - if err != nil { - d.say(notice, "No address provided") - continue - } - ownerAddr, err := address.NewFromString(owner) - if err != nil { - d.say(notice, "Failed to parse the address: %s", err.Error()) - } - d.owner = ownerAddr - case 1, 2: - val, err := (&promptui.Prompt{ - Label: d.T("Enter %s address", []string{"worker", "sender"}[i-1]), - Default: d.owner.String(), - }).Run() - if err != nil { - d.say(notice, err.Error()) - continue - } - addr, err := address.NewFromString(val) - if err != nil { - d.say(notice, "Failed to parse the address: %s", err.Error()) - } - switch i { - case 1: - d.worker = addr - case 2: - d.sender = addr - } - continue - case 3: - val, err := (&promptui.Prompt{ - Label: d.T("Enter the sector size"), - }).Run() - if err != nil { - d.say(notice, "No value provided") - continue - } - sectorSize, err := units.RAMInBytes(val) - if err != nil { - d.say(notice, "Failed to parse sector size: %s", err.Error()) - continue - } - d.ssize = abi.SectorSize(sectorSize) - continue - case 4: - confidenceStr, err := (&promptui.Prompt{ - Label: d.T("Confidence epochs"), - Default: strconv.Itoa(5), - }).Run() - if err != nil { - d.say(notice, err.Error()) - continue - } - confidence, err := strconv.ParseUint(confidenceStr, 10, 64) - if err != nil { - d.say(notice, "Failed to parse confidence: %s", err.Error()) - continue - } - d.confidence = confidence - goto minerInit // break out of the for loop once we have all the values - } - } - -minerInit: - miner, err := spcli.CreateStorageMiner(d.ctx, d.full, d.owner, d.worker, d.sender, d.ssize, d.confidence) - if err != nil { - d.say(notice, "Failed to create the miner actor: %s", err.Error()) - os.Exit(1) - } - - d.MinerID = miner - stepCompleted(d, d.T("Miner %s created successfully", miner.String())) -} - -func stepPresteps(d *MigrationData) { - - // Setup and connect to YugabyteDB - _ = getDBDetails(d) - - // Verify HarmonyDB connection - var titles []string - err := d.DB.Select(d.ctx, &titles, `SELECT title FROM harmony_config WHERE LENGTH(config) > 0`) - if err != nil { - d.say(notice, "Cannot reach the DB: %s", err.Error()) - os.Exit(1) - } - - // Get full node API - full, closer, err := cliutil.GetFullNodeAPIV1(d.cctx) - if err != nil { - d.say(notice, "Error connecting to full node API: %s", err.Error()) - os.Exit(1) - } - d.full = full - d.closers = append(d.closers, closer) - stepCompleted(d, d.T("Pre-initialization steps complete")) -} - -func stepNewMinerConfig(d *MigrationData) { - curioCfg := config.DefaultCurioConfig() - curioCfg.Addresses = append(curioCfg.Addresses, config.CurioAddresses{ - PreCommitControl: []string{}, - CommitControl: []string{}, - TerminateControl: []string{}, - DisableOwnerFallback: false, - DisableWorkerFallback: false, - MinerAddresses: []string{d.MinerID.String()}, - }) - - sk, err := io.ReadAll(io.LimitReader(rand.Reader, 32)) - if err != nil { - d.say(notice, "Failed to generate random bytes for secret: %s", err.Error()) - d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String()) - os.Exit(1) - } - - curioCfg.Apis.StorageRPCSecret = base64.StdEncoding.EncodeToString(sk) - - ainfo, err := cliutil.GetAPIInfo(d.cctx, repo.FullNode) - if err != nil { - d.say(notice, "Failed to get API info for FullNode: %w", err) - d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String()) - os.Exit(1) - } - - token, err := d.full.AuthNew(d.ctx, api.AllPermissions) - if err != nil { - d.say(notice, "Failed to verify the auth token from daemon node: %s", err.Error()) - d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String()) - os.Exit(1) - } - - curioCfg.Apis.ChainApiInfo = append(curioCfg.Apis.ChainApiInfo, fmt.Sprintf("%s:%s", string(token), ainfo.Addr)) - - // write config - var titles []string - err = d.DB.Select(d.ctx, &titles, `SELECT title FROM harmony_config WHERE LENGTH(config) > 0`) - if err != nil { - d.say(notice, "Cannot reach the DB: %s", err.Error()) - d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String()) - os.Exit(1) - } - - // If 'base' layer is not present - if !lo.Contains(titles, "base") { - curioCfg.Addresses = lo.Filter(curioCfg.Addresses, func(a config.CurioAddresses, _ int) bool { - return len(a.MinerAddresses) > 0 - }) - cb, err := config.ConfigUpdate(curioCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) - if err != nil { - d.say(notice, "Failed to generate default config: %s", err.Error()) - d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String()) - os.Exit(1) - } - _, err = d.DB.Exec(d.ctx, "INSERT INTO harmony_config (title, config) VALUES ('base', $1)", string(cb)) - if err != nil { - d.say(notice, "Failed to insert 'base' config layer in database: %s", err.Error()) - d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String()) - os.Exit(1) - } - stepCompleted(d, d.T("Configuration 'base' was updated to include this miner's address")) - return - } - - // If base layer is already present - baseCfg := config.DefaultCurioConfig() - var baseText string - - err = d.DB.QueryRow(d.ctx, "SELECT config FROM harmony_config WHERE title='base'").Scan(&baseText) - if err != nil { - d.say(notice, "Failed to load base config from database: %s", err.Error()) - d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String()) - os.Exit(1) - } - _, err = deps.LoadConfigWithUpgrades(baseText, baseCfg) - if err != nil { - d.say(notice, "Failed to parse base config: %s", err.Error()) - d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String()) - os.Exit(1) - } - - baseCfg.Addresses = append(baseCfg.Addresses, curioCfg.Addresses...) - baseCfg.Addresses = lo.Filter(baseCfg.Addresses, func(a config.CurioAddresses, _ int) bool { - return len(a.MinerAddresses) > 0 - }) - - cb, err := config.ConfigUpdate(baseCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) - if err != nil { - d.say(notice, "Failed to regenerate base config: %s", err.Error()) - d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String()) - os.Exit(1) - } - _, err = d.DB.Exec(d.ctx, "UPDATE harmony_config SET config=$1 WHERE title='base'", string(cb)) - if err != nil { - d.say(notice, "Failed to insert 'base' config layer in database: %s", err.Error()) - d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String()) - os.Exit(1) - } - - stepCompleted(d, d.T("Configuration 'base' was updated to include this miner's address")) -} - -func getDBDetails(d *MigrationData) *config.HarmonyDB { - harmonyCfg := config.DefaultStorageMiner().HarmonyDB - for { - i, _, err := (&promptui.Select{ - Label: d.T("Enter the info to connect to your Yugabyte database installation (https://download.yugabyte.com/)"), - Items: []string{ - d.T("Host: %s", strings.Join(harmonyCfg.Hosts, ",")), - d.T("Port: %s", harmonyCfg.Port), - d.T("Username: %s", harmonyCfg.Username), - d.T("Password: %s", harmonyCfg.Password), - d.T("Database: %s", harmonyCfg.Database), - d.T("Continue to connect and update schema.")}, - Size: 6, - Templates: d.selectTemplates, - }).Run() - if err != nil { - d.say(notice, "Database config error occurred, abandoning migration: %s ", err.Error()) - os.Exit(1) - } - switch i { - case 0: - host, err := (&promptui.Prompt{ - Label: d.T("Enter the Yugabyte database host(s)"), - }).Run() - if err != nil { - d.say(notice, "No host provided") - continue - } - harmonyCfg.Hosts = strings.Split(host, ",") - case 1, 2, 3, 4: - val, err := (&promptui.Prompt{ - Label: d.T("Enter the Yugabyte database %s", []string{"port", "username", "password", "database"}[i-1]), - }).Run() - if err != nil { - d.say(notice, "No value provided") - continue - } - switch i { - case 1: - harmonyCfg.Port = val - case 2: - harmonyCfg.Username = val - case 3: - harmonyCfg.Password = val - case 4: - harmonyCfg.Database = val - } - continue - case 5: - db, err := harmonydb.NewFromConfig(harmonyCfg) - if err != nil { - if err.Error() == "^C" { - os.Exit(1) - } - d.say(notice, "Error connecting to Yugabyte database: %s", err.Error()) - continue - } - d.DB = db - return &harmonyCfg - } - } -} diff --git a/cmd/curio/guidedsetup/shared.go b/cmd/curio/guidedsetup/shared.go deleted file mode 100644 index 6e7d81c0382..00000000000 --- a/cmd/curio/guidedsetup/shared.go +++ /dev/null @@ -1,258 +0,0 @@ -package guidedsetup - -import ( - "bytes" - "context" - "encoding/base64" - "fmt" - "os" - "path" - "strings" - - "github.com/BurntSushi/toml" - "github.com/ipfs/go-datastore" - "github.com/samber/lo" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/node/modules" - "github.com/filecoin-project/lotus/node/repo" -) - -const ( - FlagMinerRepo = "miner-repo" -) - -const FlagMinerRepoDeprecation = "storagerepo" - -func SaveConfigToLayer(minerRepoPath, chainApiInfo string) (minerAddress address.Address, err error) { - _, say := SetupLanguage() - ctx := context.Background() - - r, err := repo.NewFS(minerRepoPath) - if err != nil { - return minerAddress, err - } - - ok, err := r.Exists() - if err != nil { - return minerAddress, err - } - - if !ok { - return minerAddress, fmt.Errorf("repo not initialized at: %s", minerRepoPath) - } - - lr, err := r.LockRO(repo.StorageMiner) - if err != nil { - return minerAddress, fmt.Errorf("locking repo: %w", err) - } - defer func() { - err = lr.Close() - if err != nil { - fmt.Println("error closing repo: ", err) - } - }() - - cfgNode, err := lr.Config() - if err != nil { - return minerAddress, fmt.Errorf("getting node config: %w", err) - } - smCfg := cfgNode.(*config.StorageMiner) - - db, err := harmonydb.NewFromConfig(smCfg.HarmonyDB) - if err != nil { - return minerAddress, fmt.Errorf("could not reach the database. Ensure the Miner config toml's HarmonyDB entry"+ - " is setup to reach Yugabyte correctly: %w", err) - } - - var titles []string - err = db.Select(ctx, &titles, `SELECT title FROM harmony_config WHERE LENGTH(config) > 0`) - if err != nil { - return minerAddress, fmt.Errorf("miner cannot reach the db. Ensure the config toml's HarmonyDB entry"+ - " is setup to reach Yugabyte correctly: %s", err.Error()) - } - - // Copy over identical settings: - - buf, err := os.ReadFile(path.Join(lr.Path(), "config.toml")) - if err != nil { - return minerAddress, fmt.Errorf("could not read config.toml: %w", err) - } - curioCfg := config.DefaultCurioConfig() - - ensureEmptyArrays(curioCfg) - _, err = deps.LoadConfigWithUpgrades(string(buf), curioCfg) - - if err != nil { - return minerAddress, fmt.Errorf("could not decode toml: %w", err) - } - - // Populate Miner Address - mmeta, err := lr.Datastore(ctx, "/metadata") - if err != nil { - return minerAddress, xerrors.Errorf("opening miner metadata datastore: %w", err) - } - defer func() { - // _ = mmeta.Close() - }() - - maddrBytes, err := mmeta.Get(ctx, datastore.NewKey("miner-address")) - if err != nil { - return minerAddress, xerrors.Errorf("getting miner address datastore entry: %w", err) - } - - addr, err := address.NewFromBytes(maddrBytes) - if err != nil { - return minerAddress, xerrors.Errorf("parsing miner actor address: %w", err) - } - - minerAddress = addr - - curioCfg.Addresses = []config.CurioAddresses{{ - MinerAddresses: []string{addr.String()}, - PreCommitControl: smCfg.Addresses.PreCommitControl, - CommitControl: smCfg.Addresses.CommitControl, - TerminateControl: smCfg.Addresses.TerminateControl, - DisableOwnerFallback: smCfg.Addresses.DisableOwnerFallback, - DisableWorkerFallback: smCfg.Addresses.DisableWorkerFallback, - }} - - ks, err := lr.KeyStore() - if err != nil { - return minerAddress, xerrors.Errorf("keystore err: %w", err) - } - js, err := ks.Get(modules.JWTSecretName) - if err != nil { - return minerAddress, xerrors.Errorf("error getting JWTSecretName: %w", err) - } - - curioCfg.Apis.StorageRPCSecret = base64.StdEncoding.EncodeToString(js.PrivateKey) - - curioCfg.Apis.ChainApiInfo = append(curioCfg.Apis.ChainApiInfo, chainApiInfo) - // Express as configTOML - configTOML := &bytes.Buffer{} - if err = toml.NewEncoder(configTOML).Encode(curioCfg); err != nil { - return minerAddress, err - } - - if lo.Contains(titles, "base") { - // append addresses - var baseCfg = config.DefaultCurioConfig() - var baseText string - err = db.QueryRow(ctx, "SELECT config FROM harmony_config WHERE title='base'").Scan(&baseText) - if err != nil { - return minerAddress, xerrors.Errorf("Cannot load base config: %w", err) - } - ensureEmptyArrays(baseCfg) - _, err := deps.LoadConfigWithUpgrades(baseText, baseCfg) - if err != nil { - return minerAddress, xerrors.Errorf("Cannot load base config: %w", err) - } - for _, addr := range baseCfg.Addresses { - if lo.Contains(addr.MinerAddresses, curioCfg.Addresses[0].MinerAddresses[0]) { - goto skipWritingToBase - } - } - // write to base - { - baseCfg.Addresses = append(baseCfg.Addresses, curioCfg.Addresses[0]) - baseCfg.Addresses = lo.Filter(baseCfg.Addresses, func(a config.CurioAddresses, _ int) bool { - return len(a.MinerAddresses) > 0 - }) - if baseCfg.Apis.ChainApiInfo == nil { - baseCfg.Apis.ChainApiInfo = append(baseCfg.Apis.ChainApiInfo, chainApiInfo) - } - if baseCfg.Apis.StorageRPCSecret == "" { - baseCfg.Apis.StorageRPCSecret = curioCfg.Apis.StorageRPCSecret - } - - cb, err := config.ConfigUpdate(baseCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) - if err != nil { - return minerAddress, xerrors.Errorf("cannot interpret config: %w", err) - } - _, err = db.Exec(ctx, "UPDATE harmony_config SET config=$1 WHERE title='base'", string(cb)) - if err != nil { - return minerAddress, xerrors.Errorf("cannot update base config: %w", err) - } - say(plain, "Configuration 'base' was updated to include this miner's address (%s) and its wallet setup.", minerAddress) - } - say(plain, "Compare the configurations %s to %s. Changes between the miner IDs other than wallet addreses should be a new, minimal layer for runners that need it.", "base", "mig-"+curioCfg.Addresses[0].MinerAddresses[0]) - skipWritingToBase: - } else { - _, err = db.Exec(ctx, `INSERT INTO harmony_config (title, config) VALUES ('base', $1) - ON CONFLICT(title) DO UPDATE SET config=EXCLUDED.config`, configTOML) - - if err != nil { - return minerAddress, xerrors.Errorf("Cannot insert base config: %w", err) - } - say(notice, "Configuration 'base' was created to resemble this lotus-miner's config.toml .") - } - - { // make a layer representing the migration - layerName := fmt.Sprintf("mig-%s", curioCfg.Addresses[0].MinerAddresses[0]) - _, err = db.Exec(ctx, "DELETE FROM harmony_config WHERE title=$1", layerName) - if err != nil { - return minerAddress, xerrors.Errorf("Cannot delete existing layer: %w", err) - } - - _, err = db.Exec(ctx, "INSERT INTO harmony_config (title, config) VALUES ($1, $2)", layerName, configTOML.String()) - if err != nil { - return minerAddress, xerrors.Errorf("Cannot insert layer after layer created message: %w", err) - } - say(plain, "Layer %s created. ", layerName) - } - - dbSettings := getDBSettings(*smCfg) - say(plain, "To work with the config: ") - fmt.Println(code.Render(`curio ` + dbSettings + ` config edit base`)) - say(plain, `To run Curio: With machine or cgroup isolation, use the command (with example layer selection):`) - fmt.Println(code.Render(`curio ` + dbSettings + ` run --layer=post`)) - return minerAddress, nil -} - -func getDBSettings(smCfg config.StorageMiner) string { - dbSettings := "" - def := config.DefaultStorageMiner().HarmonyDB - if def.Hosts[0] != smCfg.HarmonyDB.Hosts[0] { - dbSettings += ` --db-host="` + strings.Join(smCfg.HarmonyDB.Hosts, ",") + `"` - } - if def.Port != smCfg.HarmonyDB.Port { - dbSettings += " --db-port=" + smCfg.HarmonyDB.Port - } - if def.Username != smCfg.HarmonyDB.Username { - dbSettings += ` --db-user="` + smCfg.HarmonyDB.Username + `"` - } - if def.Password != smCfg.HarmonyDB.Password { - dbSettings += ` --db-password="` + smCfg.HarmonyDB.Password + `"` - } - if def.Database != smCfg.HarmonyDB.Database { - dbSettings += ` --db-name="` + smCfg.HarmonyDB.Database + `"` - } - return dbSettings -} - -func ensureEmptyArrays(cfg *config.CurioConfig) { - if cfg.Addresses == nil { - cfg.Addresses = []config.CurioAddresses{} - } else { - for i := range cfg.Addresses { - if cfg.Addresses[i].PreCommitControl == nil { - cfg.Addresses[i].PreCommitControl = []string{} - } - if cfg.Addresses[i].CommitControl == nil { - cfg.Addresses[i].CommitControl = []string{} - } - if cfg.Addresses[i].TerminateControl == nil { - cfg.Addresses[i].TerminateControl = []string{} - } - } - } - if cfg.Apis.ChainApiInfo == nil { - cfg.Apis.ChainApiInfo = []string{} - } -} diff --git a/cmd/curio/internal/translations/catalog.go b/cmd/curio/internal/translations/catalog.go deleted file mode 100644 index 6e0ad30870d..00000000000 --- a/cmd/curio/internal/translations/catalog.go +++ /dev/null @@ -1,476 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -package translations - -import ( - "golang.org/x/text/language" - "golang.org/x/text/message" - "golang.org/x/text/message/catalog" -) - -type dictionary struct { - index []uint32 - data string -} - -func (d *dictionary) Lookup(key string) (data string, ok bool) { - p, ok := messageKeyToIndex[key] - if !ok { - return "", false - } - start, end := d.index[p], d.index[p+1] - if start == end { - return "", false - } - return d.data[start:end], true -} - -func init() { - dict := map[string]catalog.Dictionary{ - "en": &dictionary{index: enIndex, data: enData}, - "ko": &dictionary{index: koIndex, data: koData}, - "zh": &dictionary{index: zhIndex, data: zhData}, - } - fallback := language.MustParse("en") - cat, err := catalog.NewFromMap(dict, catalog.Fallback(fallback)) - if err != nil { - panic(err) - } - message.DefaultCatalog = cat -} - -var messageKeyToIndex = map[string]int{ - "Aborting migration.": 45, - "Aborting remaining steps.": 9, - "Aggregate-Anonymous: version, chain, and Miner power (bucketed).": 22, - "Cannot reach the DB: %s": 90, - "Cannot read the config.toml file in the provided directory, Error: %s": 65, - "Compare the configurations %s to %s. Changes between the miner IDs other than wallet addreses should be a new, minimal layer for runners that need it.": 116, - "Confidence epochs": 86, - "Confidence epochs: %d": 76, - "Configuration 'base' was created to resemble this lotus-miner's config.toml .": 117, - "Configuration 'base' was updated to include this miner's address": 99, - "Configuration 'base' was updated to include this miner's address (%s) and its wallet setup.": 115, - "Connected to Yugabyte": 59, - "Connected to Yugabyte. Schema is current.": 47, - "Continue to connect and update schema.": 109, - "Continue to verify the addresses and create a new miner actor.": 77, - "Could not create repo from directory: %s. Aborting migration": 66, - "Could not lock miner repo. Your miner must be stopped: %s\n Aborting migration": 67, - "Create a new miner": 8, - "Ctrl+C pressed in Terminal": 5, - "Database config error occurred, abandoning migration: %s ": 110, - "Database: %s": 108, - "Documentation: ": 32, - "Each step needs your confirmation and can be reversed. Press Ctrl+C to exit at any time.": 4, - "Enabling Sector Indexing in the database.": 48, - "Enter %s address": 82, - "Enter the Yugabyte database %s": 113, - "Enter the Yugabyte database host(s)": 111, - "Enter the info to connect to your Yugabyte database installation (https://download.yugabyte.com/)": 103, - "Enter the info to create a new miner": 71, - "Enter the owner address": 79, - "Enter the path to the configuration directory used by %s": 63, - "Enter the sector size": 83, - "Error closing backup file: %s": 56, - "Error connecting to Yugabyte database: %s": 114, - "Error connecting to full node API: %s": 91, - "Error creating backup file: %s": 53, - "Error encoding config.toml: %s": 49, - "Error expanding path: %s": 51, - "Error getting API: %s": 15, - "Error getting miner info: %s": 27, - "Error getting miner power: %s": 25, - "Error getting token: %s": 17, - "Error interpreting miner ID: %s: ID: %s": 40, - "Error marshalling message: %s": 26, - "Error reading config.toml: %s": 54, - "Error reading filemode of config.toml: %s": 52, - "Error saving config to layer: %s. Aborting Migration": 18, - "Error sending message: %s": 29, - "Error sending message: Status %s, Message: ": 30, - "Error signing message: %s": 28, - "Error verifying sectors: %s": 41, - "Error writing backup file: %s": 55, - "Error writing config.toml: %s": 57, - "Failed to create the miner actor: %s": 88, - "Failed to generate default config: %s": 97, - "Failed to generate random bytes for secret: %s": 93, - "Failed to get API info for FullNode: %w": 95, - "Failed to insert 'base' config layer in database: %s": 98, - "Failed to load base config from database: %s": 100, - "Failed to parse base config: %s": 101, - "Failed to parse confidence: %s": 87, - "Failed to parse sector size: %s": 85, - "Failed to parse the address: %s": 81, - "Failed to regenerate base config: %s": 102, - "Failed to verify the auth token from daemon node: %s": 96, - "Filecoin %s channels: %s and %s": 35, - "Hint: I am someone running Curio on whichever chain.": 23, - "Host: %s": 104, - "I want to:": 6, - "Increase reliability using redundancy: start multiple machines with at-least the post layer: 'curio run --layers=post'": 36, - "Individual Data: Miner ID, Curio version, chain (%s or %s). Signed.": 21, - "Initializing a new miner actor.": 70, - "Layer %s created. ": 118, - "Lotus-Miner to Curio Migration.": 10, - "Message sent.": 31, - "Migrate from existing Lotus-Miner": 7, - "Migrating lotus-miner config.toml to Curio in-database configuration.": 14, - "Miner %s created successfully": 89, - "Miner creation error occurred: %s ": 78, - "New Miner initialization complete.": 13, - "No address provided": 80, - "No host provided": 112, - "No path provided, abandoning migration ": 64, - "No value provided": 84, - "Nothing.": 24, - "Now shut down lotus-miner and lotus-worker and use run %s instead.": 43, - "One database can serve multiple miner IDs: Run a migration for each lotus-miner.": 37, - "Other": 62, - "Owner Address: %s": 72, - "Password: %s": 107, - "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration": 94, - "Please start (or restart) %s now that database credentials are in %s.": 38, - "Port: %s": 105, - "Pre-initialization steps complete": 92, - "Press return to continue": 44, - "Press return to update %s with Yugabyte info. A Backup file will be written to that folder before changes are made.": 50, - "Read Miner Config": 68, - "Restart Lotus Miner. ": 58, - "Sector Size: %d": 75, - "Sectors verified. %d sector locations found.": 46, - "Select the location of your lotus-miner config directory?": 61, - "Select what you want to share with the Curio team.": 20, - "Sender Address: %s": 74, - "Step Complete: %s\n": 69, - "The '%s' layer stores common configuration. All curio instances can include it in their %s argument.": 33, - "The Curio team wants to improve the software you use. Tell the team you're using `%s`.": 19, - "The sectors are in the database. The database is ready for %s.": 42, - "This interactive tool creates a new miner actor and creates the basic configuration layer for it.": 1, - "This interactive tool migrates lotus-miner to Curio in 5 minutes.": 3, - "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster < miner ID >' to finish the configuration.": 2, - "To run Curio: With machine or cgroup isolation, use the command (with example layer selection):": 120, - "To start, ensure your sealing pipeline is drained and shut-down lotus-miner.": 60, - "To work with the config: ": 119, - "Try the web interface with %s for further guided improvements.": 11, - "Use the arrow keys to navigate: ↓ ↑ → ← ": 0, - "Username: %s": 106, - "Waiting for %s to write sectors into Yugabyte.": 39, - "Worker Address: %s": 73, - "You can add other layers for per-machine configuration changes.": 34, - "You can now migrate your market node (%s), if applicable.": 12, - "could not get API info for FullNode: %w": 16, -} - -var enIndex = []uint32{ // 122 elements - // Entry 0 - 1F - 0x00000000, 0x00000035, 0x00000097, 0x0000015a, - 0x0000019c, 0x000001f5, 0x00000210, 0x0000021b, - 0x0000023d, 0x00000250, 0x0000026a, 0x0000028a, - 0x000002cc, 0x00000309, 0x0000032c, 0x00000372, - 0x0000038b, 0x000003b6, 0x000003d1, 0x00000409, - 0x00000463, 0x00000496, 0x000004e0, 0x00000521, - 0x00000556, 0x0000055f, 0x00000580, 0x000005a1, - 0x000005c1, 0x000005de, 0x000005fb, 0x0000062e, - // Entry 20 - 3F - 0x0000063c, 0x00000650, 0x000006bb, 0x000006fb, - 0x00000724, 0x0000079b, 0x000007ec, 0x00000838, - 0x0000086a, 0x00000898, 0x000008b7, 0x000008f9, - 0x0000093f, 0x00000958, 0x0000096c, 0x0000099c, - 0x000009c6, 0x000009f0, 0x00000a12, 0x00000a89, - 0x00000aa5, 0x00000ad2, 0x00000af4, 0x00000b15, - 0x00000b36, 0x00000b57, 0x00000b78, 0x00000b92, - 0x00000ba8, 0x00000bf5, 0x00000c2f, 0x00000c35, - // Entry 40 - 5F - 0x00000c71, 0x00000c9d, 0x00000ce6, 0x00000d26, - 0x00000d77, 0x00000d89, 0x00000da3, 0x00000dc3, - 0x00000de8, 0x00000dfd, 0x00000e13, 0x00000e29, - 0x00000e3c, 0x00000e55, 0x00000e94, 0x00000ebe, - 0x00000ed6, 0x00000eea, 0x00000f0d, 0x00000f21, - 0x00000f37, 0x00000f49, 0x00000f6c, 0x00000f7e, - 0x00000fa0, 0x00000fc8, 0x00000fe9, 0x00001004, - 0x0000102d, 0x0000104f, 0x00001081, 0x00001118, - // Entry 60 - 7F - 0x00001143, 0x0000117b, 0x000011a4, 0x000011dc, - 0x0000121d, 0x0000124d, 0x00001270, 0x00001298, - 0x000012fa, 0x00001306, 0x00001312, 0x00001322, - 0x00001332, 0x00001342, 0x00001369, 0x000013aa, - 0x000013ce, 0x000013df, 0x00001401, 0x0000142e, - 0x0000148d, 0x0000152a, 0x00001578, 0x00001592, - 0x000015b0, 0x00001610, -} // Size: 512 bytes - -const enData string = "" + // Size: 5648 bytes - "\x04\x00\x01 0\x02Use the arrow keys to navigate: ↓ ↑ → ←\x02This intera" + - "ctive tool creates a new miner actor and creates the basic configuration" + - " layer for it.\x02This process is partially idempotent. Once a new miner" + - " actor has been created and subsequent steps fail, the user need to run " + - "'curio config new-cluster < miner ID >' to finish the configuration.\x02" + - "This interactive tool migrates lotus-miner to Curio in 5 minutes.\x02Eac" + - "h step needs your confirmation and can be reversed. Press Ctrl+C to exit" + - " at any time.\x02Ctrl+C pressed in Terminal\x02I want to:\x02Migrate fro" + - "m existing Lotus-Miner\x02Create a new miner\x02Aborting remaining steps" + - ".\x02Lotus-Miner to Curio Migration.\x02Try the web interface with %[1]s" + - " for further guided improvements.\x02You can now migrate your market nod" + - "e (%[1]s), if applicable.\x02New Miner initialization complete.\x02Migra" + - "ting lotus-miner config.toml to Curio in-database configuration.\x02Erro" + - "r getting API: %[1]s\x02could not get API info for FullNode: %[1]w\x02Er" + - "ror getting token: %[1]s\x02Error saving config to layer: %[1]s. Abortin" + - "g Migration\x02The Curio team wants to improve the software you use. Tel" + - "l the team you're using `%[1]s`.\x02Select what you want to share with t" + - "he Curio team.\x02Individual Data: Miner ID, Curio version, chain (%[1]s" + - " or %[2]s). Signed.\x02Aggregate-Anonymous: version, chain, and Miner po" + - "wer (bucketed).\x02Hint: I am someone running Curio on whichever chain." + - "\x02Nothing.\x02Error getting miner power: %[1]s\x02Error marshalling me" + - "ssage: %[1]s\x02Error getting miner info: %[1]s\x02Error signing message" + - ": %[1]s\x02Error sending message: %[1]s\x04\x00\x01 .\x02Error sending m" + - "essage: Status %[1]s, Message:\x02Message sent.\x04\x00\x01 \x0f\x02Docu" + - "mentation:\x02The '%[1]s' layer stores common configuration. All curio i" + - "nstances can include it in their %[2]s argument.\x02You can add other la" + - "yers for per-machine configuration changes.\x02Filecoin %[1]s channels: " + - "%[2]s and %[3]s\x02Increase reliability using redundancy: start multiple" + - " machines with at-least the post layer: 'curio run --layers=post'\x02One" + - " database can serve multiple miner IDs: Run a migration for each lotus-m" + - "iner.\x02Please start (or restart) %[1]s now that database credentials a" + - "re in %[2]s.\x02Waiting for %[1]s to write sectors into Yugabyte.\x02Err" + - "or interpreting miner ID: %[1]s: ID: %[2]s\x02Error verifying sectors: %" + - "[1]s\x02The sectors are in the database. The database is ready for %[1]s" + - ".\x02Now shut down lotus-miner and lotus-worker and use run %[1]s instea" + - "d.\x02Press return to continue\x02Aborting migration.\x02Sectors verifie" + - "d. %[1]d sector locations found.\x02Connected to Yugabyte. Schema is cur" + - "rent.\x02Enabling Sector Indexing in the database.\x02Error encoding con" + - "fig.toml: %[1]s\x02Press return to update %[1]s with Yugabyte info. A Ba" + - "ckup file will be written to that folder before changes are made.\x02Err" + - "or expanding path: %[1]s\x02Error reading filemode of config.toml: %[1]s" + - "\x02Error creating backup file: %[1]s\x02Error reading config.toml: %[1]" + - "s\x02Error writing backup file: %[1]s\x02Error closing backup file: %[1]" + - "s\x02Error writing config.toml: %[1]s\x04\x00\x01 \x15\x02Restart Lotus " + - "Miner.\x02Connected to Yugabyte\x02To start, ensure your sealing pipelin" + - "e is drained and shut-down lotus-miner.\x02Select the location of your l" + - "otus-miner config directory?\x02Other\x02Enter the path to the configura" + - "tion directory used by %[1]s\x04\x00\x01 '\x02No path provided, abandoni" + - "ng migration\x02Cannot read the config.toml file in the provided directo" + - "ry, Error: %[1]s\x02Could not create repo from directory: %[1]s. Abortin" + - "g migration\x02Could not lock miner repo. Your miner must be stopped: %[" + - "1]s\x0a Aborting migration\x02Read Miner Config\x04\x00\x01\x0a\x15\x02S" + - "tep Complete: %[1]s\x02Initializing a new miner actor.\x02Enter the info" + - " to create a new miner\x02Owner Address: %[1]s\x02Worker Address: %[1]s" + - "\x02Sender Address: %[1]s\x02Sector Size: %[1]d\x02Confidence epochs: %[" + - "1]d\x02Continue to verify the addresses and create a new miner actor." + - "\x04\x00\x01 %\x02Miner creation error occurred: %[1]s\x02Enter the owne" + - "r address\x02No address provided\x02Failed to parse the address: %[1]s" + - "\x02Enter %[1]s address\x02Enter the sector size\x02No value provided" + - "\x02Failed to parse sector size: %[1]s\x02Confidence epochs\x02Failed to" + - " parse confidence: %[1]s\x02Failed to create the miner actor: %[1]s\x02M" + - "iner %[1]s created successfully\x02Cannot reach the DB: %[1]s\x02Error c" + - "onnecting to full node API: %[1]s\x02Pre-initialization steps complete" + - "\x02Failed to generate random bytes for secret: %[1]s\x02Please do not r" + - "un guided-setup again as miner creation is not idempotent. You need to r" + - "un 'curio config new-cluster %[1]s' to finish the configuration\x02Faile" + - "d to get API info for FullNode: %[1]w\x02Failed to verify the auth token" + - " from daemon node: %[1]s\x02Failed to generate default config: %[1]s\x02" + - "Failed to insert 'base' config layer in database: %[1]s\x02Configuration" + - " 'base' was updated to include this miner's address\x02Failed to load ba" + - "se config from database: %[1]s\x02Failed to parse base config: %[1]s\x02" + - "Failed to regenerate base config: %[1]s\x02Enter the info to connect to " + - "your Yugabyte database installation (https://download.yugabyte.com/)\x02" + - "Host: %[1]s\x02Port: %[1]s\x02Username: %[1]s\x02Password: %[1]s\x02Data" + - "base: %[1]s\x02Continue to connect and update schema.\x04\x00\x01 <\x02D" + - "atabase config error occurred, abandoning migration: %[1]s\x02Enter the " + - "Yugabyte database host(s)\x02No host provided\x02Enter the Yugabyte data" + - "base %[1]s\x02Error connecting to Yugabyte database: %[1]s\x02Configurat" + - "ion 'base' was updated to include this miner's address (%[1]s) and its w" + - "allet setup.\x02Compare the configurations %[1]s to %[2]s. Changes betwe" + - "en the miner IDs other than wallet addreses should be a new, minimal lay" + - "er for runners that need it.\x02Configuration 'base' was created to rese" + - "mble this lotus-miner's config.toml .\x04\x00\x01 \x15\x02Layer %[1]s cr" + - "eated.\x04\x00\x01 \x19\x02To work with the config:\x02To run Curio: Wit" + - "h machine or cgroup isolation, use the command (with example layer selec" + - "tion):" - -var koIndex = []uint32{ // 122 elements - // Entry 0 - 1F - 0x00000000, 0x00000044, 0x000000c1, 0x000001c1, - 0x0000020e, 0x00000289, 0x000002aa, 0x000002bc, - 0x000002e5, 0x00000300, 0x00000325, 0x00000348, - 0x000003b2, 0x00000402, 0x00000428, 0x00000481, - 0x000004a0, 0x000004dc, 0x0000050c, 0x00000564, - 0x000005f0, 0x00000629, 0x0000067f, 0x000006bd, - 0x0000070b, 0x00000726, 0x00000760, 0x00000793, - 0x000007cd, 0x000007f7, 0x00000821, 0x00000863, - // Entry 20 - 3F - 0x00000887, 0x00000894, 0x0000091a, 0x0000096c, - 0x00000993, 0x00000a2f, 0x00000ac1, 0x00000b3c, - 0x00000b80, 0x00000bbe, 0x00000be5, 0x00000c50, - 0x00000c9d, 0x00000cc4, 0x00000cdf, 0x00000d2e, - 0x00000d6f, 0x00000daf, 0x00000df6, 0x00000e9c, - 0x00000ecc, 0x00000f1b, 0x00000f3e, 0x00000f5f, - 0x00000f82, 0x00000fa5, 0x00000fe3, 0x00001007, - 0x0000101d, 0x00001088, 0x000010d7, 0x000010de, - // Entry 40 - 5F - 0x00001126, 0x00001178, 0x000011d2, 0x0000123c, - 0x000012cd, 0x000012e5, 0x000012ff, 0x00001323, - 0x00001356, 0x0000136e, 0x00001386, 0x0000139e, - 0x000013b3, 0x000013cb, 0x00001422, 0x0000144d, - 0x00001465, 0x0000148c, 0x000014af, 0x000014c3, - 0x000014d8, 0x000014fc, 0x00001526, 0x00001537, - 0x0000155d, 0x00001583, 0x000015bc, 0x000015f4, - 0x0000162c, 0x0000164b, 0x00001697, 0x00001755, - // Entry 60 - 7F - 0x000017a1, 0x000017ef, 0x00001812, 0x0000186e, - 0x000018be, 0x00001913, 0x00001956, 0x00001995, - 0x00001a03, 0x00001a14, 0x00001a22, 0x00001a3a, - 0x00001a4e, 0x00001a68, 0x00001a92, 0x00001af5, - 0x00001b31, 0x00001b5b, 0x00001b93, 0x00001be7, - 0x00001c60, 0x00001d1a, 0x00001d71, 0x00001da0, - 0x00001dc7, 0x00001e53, -} // Size: 512 bytes - -const koData string = "" + // Size: 7763 bytes - "\x04\x00\x01 ?\x02화살표 키를 사용하여 이동하세요: ↓ ↑ → ←\x02이 대화형 도구는 새로운 채굴자 액터를 생성" + - "하고 그에 대한 기본 구성 레이어를 생성합니다.\x02이 프로세스는 부분적으로 항등원적입니다. 새로운 채굴자 액터가 생성되었고" + - " 후속 단계가 실패하는 경우 사용자는 구성을 완료하기 위해 'curio config new-cluster < 채굴자 ID >'를 " + - "실행해야 합니다.\x02이 대화형 도구는 5분 안에 lotus-miner를 Curio로 이주합니다.\x02각 단계는 확인이 필" + - "요하며 되돌릴 수 있습니다. 언제든지 Ctrl+C를 눌러 종료할 수 있습니다.\x02터미널에서 Ctrl+C가 눌림\x02나는 " + - "원한다:\x02기존의 Lotus-Miner에서 이전하기\x02새로운 채굴자 생성\x02나머지 단계를 중단합니다.\x02Lotu" + - "s-Miner에서 Curio로 이주.\x02%[1]s를 사용하여 웹 인터페이스를 시도하고 더 나은 안내된 개선을 진행하세요." + - "\x02해당하는 경우 이제 시장 노드를 이주할 수 있습니다 (%[1]s).\x02새로운 채굴자 초기화 완료.\x02lotus-mi" + - "ner config.toml을 Curio의 데이터베이스 구성으로 이전 중입니다.\x02API 가져오기 오류: %[1]s\x02Fu" + - "llNode의 API 정보를 가져올 수 없습니다: %[1]w\x02토큰을 가져오는 중 오류 발생: %[1]s\x02레이어에 구성을" + - " 저장하는 중 오류 발생: %[1]s. 마이그레이션 중단\x02Curio 팀은 당신이 사용하는 소프트웨어를 개선하고자 합니다. 팀" + - "에게 `%[1]s`를 사용 중이라고 알려주세요.\x02Curio 팀과 공유하고 싶은 것을 선택하세요.\x02개별 데이터: 채굴" + - "자 ID, Curio 버전, 체인 (%[1]s 또는 %[2]s). 서명됨.\x02집계-익명: 버전, 체인, 및 채굴자 파워 (" + - "버킷).\x02힌트: 나는 어떤 체인에서든 Curio를 실행 중인 사람입니다.\x02아무것도 없습니다.\x02마이너 파워를 가" + - "져오는 중 오류 발생: %[1]s\x02메시지를 마샬하는 중 오류 발생: %[1]s\x02마이너 정보를 가져오는 중 오류 발생" + - ": %[1]s\x02메시지 서명 중 오류 발생: %[1]s\x02메시지 전송 중 오류 발생: %[1]s\x04\x00\x01 =" + - "\x02메시지 전송 중 오류 발생: 상태 %[1]s, 메시지:\x02메시지가 전송되었습니다.\x04\x00\x01 \x08\x02" + - "문서:\x02'%[1]s' 레이어에는 공통 구성이 저장됩니다. 모든 Curio 인스턴스는 %[2]s 인수에 포함시킬 수 있습니" + - "다.\x02기계별 구성 변경을 위해 다른 레이어를 추가할 수 있습니다.\x02Filecoin %[1]s 채널: %[2]s 및 " + - "%[3]s\x02신뢰성 향상을 위한 중복성 사용: 적어도 post 레이어를 사용하여 여러 대의 기계를 시작하십시오: 'curio " + - "run --layers=post'\x02한 개의 데이터베이스는 여러 광부 ID를 제공할 수 있습니다: 각 lotus-miner에 " + - "대해 마이그레이션을 실행하세요.\x02데이터베이스 자격 증명이 %[2]s에 입력되었으므로 지금 %[1]s을 시작하거나 다시 시" + - "작하세요.\x02%[1]s가 Yugabyte에 섹터를 기록하도록 대기 중입니다.\x02광부 ID를 해석하는 중 오류 발생: %" + - "[1]s: ID: %[2]s\x02섹터 확인 중 오류 발생: %[1]s\x02섹터가 데이터베이스에 있습니다. 데이터베이스가 %[1" + - "]s를 위해 준비되었습니다.\x02이제 lotus-miner와 lotus-worker를 종료하고 %[1]s을 실행하세요.\x02계" + - "속하려면 리턴을 누르세요\x02마이그레이션 중단.\x02섹터가 확인되었습니다. %[1]d개의 섹터 위치를 찾았습니다.\x02Y" + - "ugabyte에 연결되었습니다. 스키마가 현재입니다.\x02데이터베이스에서 Sector Indexing을 활성화합니다.\x02co" + - "nfig.toml을 인코딩하는 중 오류가 발생했습니다: %[1]s\x02%[1]s을 Yugabyte 정보로 업데이트하려면 리턴 키" + - "를 누르세요. 변경 사항을 적용하기 전에 해당 폴더에 백업 파일이 작성됩니다.\x02경로를 확장하는 중 오류 발생: %[1]s" + - "\x02config.toml의 파일 모드를 읽는 중 오류가 발생했습니다: %[1]s\x02백업 파일 생성 오류: %[1]s\x02" + - "config.toml 읽기 오류: %[1]s\x02백업 파일 쓰기 오류: %[1]s\x02백업 파일 닫기 오류: %[1]s\x02" + - "config.toml을 쓰는 중 오류가 발생했습니다: %[1]s\x04\x00\x01 \x1f\x02로터스 마이너 재시작.\x02" + - "Yugabyte에 연결됨\x02시작하려면 밀봉 파이프라인이 비어 있고 lotus-miner가 종료되었는지 확인하세요.\x02로터스" + - " 마이너 구성 디렉토리의 위치를 선택하시겠습니까?\x02기타\x02%[1]s에서 사용하는 구성 디렉터리 경로를 입력하세요.\x04" + - "\x00\x01 M\x02경로가 제공되지 않았으므로 마이그레이션을 포기합니다\x02제공된 디렉토리에서 config.toml 파일을" + - " 읽을 수 없습니다. 오류: %[1]s\x02디렉토리에서 저장소를 생성할 수 없습니다: %[1]s. 마이그레이션을 중단합니다." + - "\x02광부 저장소를 잠금 해제할 수 없습니다. 귀하의 광부를 중지해야 합니다: %[1]s\x0a 마이그레이션을 중단합니다." + - "\x02마이너 구성 읽기\x04\x00\x01\x0a\x15\x02단계 완료: %[1]s\x02새 채굴자 액터 초기화 중.\x02" + - "새 채굴자를 생성하기 위한 정보 입력\x02소유자 주소: %[1]s\x02작업자 주소: %[1]s\x02송신자 주소: %[1]" + - "s\x02섹터 크기: %[1]d\x02신뢰 에포크: %[1]d\x02주소를 확인하고 새 채굴자 액터를 생성하려면 계속 진행하세요." + - "\x04\x00\x01 &\x02채굴자 생성 오류 발생: %[1]s\x02소유자 주소 입력\x02주소가 제공되지 않았습니다\x02" + - "주소 구문 분석 실패: %[1]s\x02%[1]s 주소 입력\x02섹터 크기 입력\x02값이 제공되지 않았습니다\x02섹터 크" + - "기 구문 분석 실패: %[1]s\x02신뢰 에포크\x02신뢰도 구문 분석 실패: %[1]s\x02채굴자 액터 생성 실패: %[" + - "1]s\x02%[1]s 채굴자가 성공적으로 생성되었습니다\x02데이터베이스에 연결할 수 없습니다: %[1]s\x02풀 노드 API" + - "에 연결하는 중 오류 발생: %[1]s\x02사전 초기화 단계 완료\x02비밀번호를 위한 랜덤 바이트 생성에 실패했습니다: %" + - "[1]s\x02마이너 생성은 idempotent하지 않으므로 가이드 설정을 다시 실행하지 마십시오. 구성을 완료하려면 'curio" + - " config new-cluster %[1]s'를 실행해야 합니다.\x02FullNode에 대한 API 정보를 가져오는 데 실패했" + - "습니다: %[1]w\x02데몬 노드로부터 인증 토큰을 확인하는 중 오류 발생: %[1]s\x02기본 구성 생성 실패: %[1]" + - "s\x02데이터베이스에 'base' 구성 레이어를 삽입하는 데 실패했습니다: %[1]s\x02이 마이너 주소를 포함한 구성 'ba" + - "se'가 업데이트되었습니다.\x02데이터베이스에서 기본 구성을 로드하는 데 실패했습니다: %[1]s\x02기본 구성을 구문 분석하" + - "는 데 실패했습니다: %[1]s\x02기본 구성을 재생성하는 데 실패했습니다: %[1]s\x02Yugabyte 데이터베이스 설" + - "치에 연결할 정보를 입력하십시오 (https://download.yugabyte.com/)\x02호스트: %[1]s\x02포트" + - ": %[1]s\x02사용자 이름: %[1]s\x02비밀번호: %[1]s\x02데이터베이스: %[1]s\x02계속 연결 및 스키마 " + - "업데이트.\x04\x00\x01 ^\x02데이터베이스 구성 오류가 발생하여 마이그레이션을 포기합니다: %[1]s\x02Yuga" + - "byte 데이터베이스 호스트를 입력하십시오\x02호스트가 제공되지 않았습니다\x02Yugabyte 데이터베이스 %[1]s을 입력하" + - "십시오\x02Yugabyte 데이터베이스에 연결하는 중 오류가 발생했습니다: %[1]s\x02기본 설정 'base'가 이 마이" + - "너의 주소(%[1]s) 및 지갑 설정을 포함하도록 업데이트되었습니다.\x02구성 %[1]s를 %[2]s과 비교하세요. 지갑 주" + - "소 이외의 마이너 ID 사이의 변경 사항은 필요한 실행자를 위한 새로운 최소한의 레이어여야 합니다.\x02'base' 설정이 " + - "이 lotus-miner의 config.toml과 유사하게 만들어졌습니다.\x04\x00\x01 *\x02레이어 %[1]s가 " + - "생성되었습니다.\x04\x00\x01 \x22\x02구성 파일을 사용하려면:\x02Curio를 실행하려면: 기계 또는 cgro" + - "up 격리를 사용하여 다음 명령을 사용하세요 (예제 레이어 선택과 함께):" - -var zhIndex = []uint32{ // 122 elements - // Entry 0 - 1F - 0x00000000, 0x00000033, 0x0000008b, 0x00000134, - 0x0000017c, 0x000001cb, 0x000001e4, 0x000001f1, - 0x00000211, 0x0000022a, 0x00000240, 0x0000025d, - 0x000002a5, 0x000002e6, 0x00000302, 0x00000347, - 0x00000364, 0x0000038d, 0x000003ab, 0x000003e4, - 0x00000438, 0x00000465, 0x000004b4, 0x000004ef, - 0x00000524, 0x0000052e, 0x00000552, 0x00000570, - 0x00000594, 0x000005b2, 0x000005d0, 0x00000605, - // Entry 20 - 3F - 0x00000618, 0x00000627, 0x00000681, 0x000006be, - 0x000006e6, 0x00000745, 0x00000795, 0x000007e8, - 0x0000080e, 0x0000083b, 0x00000859, 0x00000895, - 0x000008d9, 0x000008e9, 0x000008f9, 0x0000092c, - 0x00000959, 0x0000097e, 0x000009a1, 0x00000a19, - 0x00000a37, 0x00000a66, 0x00000a8a, 0x00000aaf, - 0x00000ad3, 0x00000af7, 0x00000b1a, 0x00000b3a, - 0x00000b4f, 0x00000b9a, 0x00000bca, 0x00000bd1, - // Entry 40 - 5F - 0x00000bfb, 0x00000c1f, 0x00000c63, 0x00000c95, - 0x00000cde, 0x00000cf1, 0x00000d0b, 0x00000d2a, - 0x00000d4f, 0x00000d67, 0x00000d7c, 0x00000d94, - 0x00000da8, 0x00000dbf, 0x00000df0, 0x00000e15, - 0x00000e2b, 0x00000e3b, 0x00000e55, 0x00000e69, - 0x00000e7c, 0x00000e89, 0x00000ea9, 0x00000eb9, - 0x00000ed6, 0x00000ef6, 0x00000f10, 0x00000f2d, - 0x00000f5e, 0x00000f77, 0x00000fa0, 0x0000102d, - // Entry 60 - 7F - 0x00001059, 0x00001094, 0x000010b4, 0x000010e5, - 0x00001118, 0x00001145, 0x00001166, 0x0000118c, - 0x000011e6, 0x000011f5, 0x00001204, 0x00001216, - 0x00001225, 0x00001237, 0x00001256, 0x0000128e, - 0x000012b3, 0x000012c3, 0x000012e1, 0x0000130d, - 0x0000135e, 0x000013e0, 0x00001427, 0x00001441, - 0x00001459, 0x000014b0, -} // Size: 512 bytes - -const zhData string = "" + // Size: 5296 bytes - "\x04\x00\x01 .\x02使用箭头键进行导航:↓ ↑ → ←\x02此交互式工具将创建一个新的矿工角色,并为其创建基本配置层。\x02" + - "该过程部分幂等。一旦创建了新的矿工角色,并且随后的步骤失败,用户需要运行 'curio config new-cluster < 矿工 ID" + - " >' 来完成配置。\x02这个交互式工具可以在5分钟内将lotus-miner迁移到Curio。\x02每一步都需要您的确认,并且可以撤销。随" + - "时按Ctrl+C退出。\x02在终端中按下Ctrl+C\x02我想要:\x02从现有的 Lotus-Miner 迁移\x02创建一个新的矿工" + - "\x02中止剩余步骤。\x02Lotus-Miner到Curio迁移。\x02尝试使用%[1]s的网络界面进行更进一步的指导性改进。\x02如果" + - "适用,您现在可以迁移您的市场节点(%[1]s)。\x02新矿工初始化完成。\x02将 lotus-miner config.toml 迁移到" + - " Curio 的数据库配置中。\x02获取 API 时出错:%[1]s\x02无法获取FullNode的API信息:%[1]w\x02获取令牌时" + - "出错:%[1]s\x02保存配置到层时出错:%[1]s。正在中止迁移\x02Curio 团队希望改进您使用的软件。告诉团队您正在使用 `%[" + - "1]s`。\x02选择您想与Curio团队分享的内容。\x02个人数据:矿工 ID,Curio 版本,链(%[1]s 或 %[2]s)。签名。" + - "\x02聚合-匿名:版本,链和矿工算力(分桶)。\x02提示:我是在任何链上运行 Curio 的人。\x02没有。\x02获取矿工功率时出错:%" + - "[1]s\x02整理消息时出错:%[1]s\x02获取矿工信息时出错:%[1]s\x02签署消息时出错:%[1]s\x02发送消息时出错:%[1" + - "]s\x04\x00\x01 0\x02发送消息时出错:状态%[1]s,消息:\x02消息已发送。\x04\x00\x01 \x0a\x02文档" + - ":\x02'%[1]s'层存储通用配置。所有Curio实例都可以在其%[2]s参数中包含它。\x02您可以添加其他层进行每台机器的配置更改。" + - "\x02Filecoin %[1]s 频道:%[2]s 和 %[3]s\x02通过冗余增加可靠性:使用至少后层启动多台机器:'curio run" + - " --layers=post'\x02一个数据库可以服务多个矿工ID:为每个lotus-miner运行迁移。\x02请立即启动(或重新启动)%[" + - "1]s,因为数据库凭据已在%[2]s中。\x02等待%[1]s将扇区写入Yugabyte。\x02解释矿工ID时出错:%[1]s:ID:%[2]" + - "s\x02验证扇区时出错:%[1]s\x02扇区在数据库中。数据库已准备好用于%[1]s。\x02现在关闭lotus-miner和lotus-w" + - "orker,改为使用%[1]s运行。\x02按回车继续\x02中止迁移。\x02扇区已验证。发现了%[1]d个扇区位置。\x02已连接到Yuga" + - "byte。模式是当前的。\x02在数据库中启用扇区索引。\x02编码config.toml时出错:%[1]s\x02按回车键更新 %[1]s 以" + - "包含 Yugabyte 信息。在进行更改之前,将在该文件夹中写入备份文件。\x02扩展路径时出错:%[1]s\x02读取config.tom" + - "l文件模式时出错:%[1]s\x02创建备份文件时出错:%[1]s\x02读取 config.toml 时出错:%[1]s\x02写入备份文件时" + - "出错:%[1]s\x02关闭备份文件时出错:%[1]s\x02写入config.toml时出错:%[1]s\x04\x00\x01 \x1b" + - "\x02重新启动Lotus Miner。\x02已连接到Yugabyte\x02开始之前,请确保您的密封管道已排空并关闭lotus-miner。" + - "\x02选择您的lotus-miner配置目录的位置?\x02其他\x02输入%[1]s使用的配置目录的路径\x04\x00\x01 \x1f" + - "\x02未提供路径,放弃迁移\x02无法读取提供的目录中的config.toml文件,错误:%[1]s\x02无法从目录创建repo:%[1]s" + - "。 中止迁移\x02无法锁定矿工repo。 您的矿工必须停止:%[1]s\x0a 中止迁移\x02读取矿工配置\x04\x00\x01" + - "\x0a\x15\x02步骤完成:%[1]s\x02初始化新的矿工角色。\x02输入创建新矿工所需的信息\x02所有者地址:%[1]s\x02工" + - "作地址:%[1]s\x02发送者地址:%[1]s\x02扇区大小: %[1]d\x02置信度时期: %[1]d\x02继续验证地址并创建新的" + - "矿工角色。\x04\x00\x01 \x02矿工创建错误发生: %[1]s\x02输入所有者地址\x02未提供地址\x02解析地址失败: " + - "%[1]s\x02输入 %[1]s 地址\x02输入扇区大小\x02未提供值\x02解析扇区大小失败: %[1]s\x02置信度时期\x02解析" + - "置信度失败: %[1]s\x02创建矿工角色失败: %[1]s\x02矿工 %[1]s 创建成功\x02无法访问数据库: %[1]s\x02" + - "连接到完整节点 API 时发生错误: %[1]s\x02预初始化步骤完成\x02生成密码的随机字节失败: %[1]s\x02请不要再次运行引" + - "导设置,因为矿工创建不是幂等的。 您需要运行 'curio config new-cluster %[1]s' 来完成配置。\x02无法获取" + - " FullNode 的 API 信息: %[1]w\x02无法验证来自守护进程节点的授权令牌: %[1]s\x02无法生成默认配置: %[1]s" + - "\x02无法将 'base' 配置层插入数据库: %[1]s\x02配置 'base' 已更新以包含此矿工的地址\x02从数据库加载基本配置失败" + - ":%[1]s\x02解析基本配置失败:%[1]s\x02重新生成基本配置失败: %[1]s\x02输入连接到您的Yugabyte数据库安装的" + - "信息(https://download.yugabyte.com/)\x02主机:%[1]s\x02端口:%[1]s\x02用户名:%[1]" + - "s\x02密码:%[1]s\x02数据库:%[1]s\x02继续连接和更新架构。\x04\x00\x01 3\x02发生数据库配置错误,放弃迁移" + - ":%[1]s\x02输入Yugabyte数据库主机(S)\x02未提供主机\x02输入Yugabyte数据库 %[1]s\x02连接到Yug" + - "abyte数据库时出错:%[1]s\x02'base'配置已更新,包括该矿工的地址(%[1]s)及其钱包设置。\x02比较配置%[1]s和%[2" + - "]s。矿工ID之间除了钱包地址的变化应该是需要的运行者的一个新的、最小的层。\x02'base'配置已创建,以类似于这个lotus-miner的" + - "config.toml。\x04\x00\x01 \x15\x02层%[1]s已创建。\x04\x00\x01 \x13\x02要使用配置:" + - "\x02运行Curio:使用机器或cgroup隔离,使用命令(附带示例层选择):" - - // Total table size 20243 bytes (19KiB); checksum: AB52E150 diff --git a/cmd/curio/internal/translations/knowns/main.go b/cmd/curio/internal/translations/knowns/main.go deleted file mode 100644 index a30a940657e..00000000000 --- a/cmd/curio/internal/translations/knowns/main.go +++ /dev/null @@ -1,82 +0,0 @@ -package main - -import ( - "bytes" - "encoding/json" - "fmt" - "os" - "path" - - "github.com/samber/lo" -) - -func main() { - for _, arg := range os.Args { - handleKnowns(arg) - } -} - -func handleKnowns(pathStart string) { - outpath := path.Join(pathStart, "out.gotext.json") - b, err := os.ReadFile(outpath) - if err != nil { - fmt.Println("cannot open "+outpath+":", err) - return - } - type TMsg struct { - ID string `json:"id"` - Translation string `json:"translation"` - Message string `json:"message"` - Placeholder json.RawMessage `json:"placeholder"` - } - type Dataformat struct { - Language string `json:"language"` - Messages []TMsg `json:"messages"` - } - var outData Dataformat - err = json.NewDecoder(bytes.NewBuffer(b)).Decode(&outData) - if err != nil { - fmt.Println("cannot decode "+outpath+":", err) - return - } - - f, err := os.Open(path.Join(pathStart, "messages.gotext.json")) - if err != nil { - fmt.Println("cannot open "+path.Join(pathStart, "messages.gotext.json")+":", err) - return - } - defer func() { _ = f.Close() }() - - var msgData Dataformat - err = json.NewDecoder(f).Decode(&msgData) - if err != nil { - fmt.Println("cannot decode "+path.Join(pathStart, "messages.gotext.json")+":", err) - return - } - - knowns := map[string]string{} - for _, msg := range msgData.Messages { - knowns[msg.ID] = msg.Translation - } - - toTranslate := lo.Filter(outData.Messages, func(msg TMsg, _ int) bool { - _, ok := knowns[msg.ID] - return !ok - }) - - outData.Messages = toTranslate // drop the "done" messages - var outJSON bytes.Buffer - enc := json.NewEncoder(&outJSON) - enc.SetIndent(" ", " ") - err = enc.Encode(outData) - if err != nil { - fmt.Println("cannot encode "+outpath+":", err) - return - } - err = os.WriteFile(outpath, outJSON.Bytes(), 0644) - if err != nil { - fmt.Println("cannot write "+outpath+":", err) - return - } - fmt.Println("rearranged successfully") -} diff --git a/cmd/curio/internal/translations/locales/en/out.gotext.json b/cmd/curio/internal/translations/locales/en/out.gotext.json deleted file mode 100644 index 6046786220d..00000000000 --- a/cmd/curio/internal/translations/locales/en/out.gotext.json +++ /dev/null @@ -1,1636 +0,0 @@ -{ - "language": "en", - "messages": [ - { - "id": "Use the arrow keys to navigate: ↓ ↑ → ←", - "message": "Use the arrow keys to navigate: ↓ ↑ → ←", - "translation": "Use the arrow keys to navigate: ↓ ↑ → ←", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "This interactive tool creates a new miner actor and creates the basic configuration layer for it.", - "message": "This interactive tool creates a new miner actor and creates the basic configuration layer for it.", - "translation": "This interactive tool creates a new miner actor and creates the basic configuration layer for it.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster \u003c miner ID \u003e' to finish the configuration.", - "message": "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster \u003c miner ID \u003e' to finish the configuration.", - "translation": "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster \u003c miner ID \u003e' to finish the configuration.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "This interactive tool migrates lotus-miner to Curio in 5 minutes.", - "message": "This interactive tool migrates lotus-miner to Curio in 5 minutes.", - "translation": "This interactive tool migrates lotus-miner to Curio in 5 minutes.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Each step needs your confirmation and can be reversed. Press Ctrl+C to exit at any time.", - "message": "Each step needs your confirmation and can be reversed. Press Ctrl+C to exit at any time.", - "translation": "Each step needs your confirmation and can be reversed. Press Ctrl+C to exit at any time.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Ctrl+C pressed in Terminal", - "message": "Ctrl+C pressed in Terminal", - "translation": "Ctrl+C pressed in Terminal", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "I want to:", - "message": "I want to:", - "translation": "I want to:", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Migrate from existing Lotus-Miner", - "message": "Migrate from existing Lotus-Miner", - "translation": "Migrate from existing Lotus-Miner", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Create a new miner", - "message": "Create a new miner", - "translation": "Create a new miner", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Aborting remaining steps.", - "message": "Aborting remaining steps.", - "translation": "Aborting remaining steps.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]v", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Lotus-Miner to Curio Migration.", - "message": "Lotus-Miner to Curio Migration.", - "translation": "Lotus-Miner to Curio Migration.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Try the web interface with {Rendercurio_run___layersgui} for further guided improvements.", - "message": "Try the web interface with {Rendercurio_run___layersgui} for further guided improvements.", - "translation": "Try the web interface with {Rendercurio_run___layersgui} for further guided improvements.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Rendercurio_run___layersgui", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "code.Render(\"curio run --layers=gui\")" - } - ], - "fuzzy": true - }, - { - "id": "You can now migrate your market node ({Boost}), if applicable.", - "message": "You can now migrate your market node ({Boost}), if applicable.", - "translation": "You can now migrate your market node ({Boost}), if applicable.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Boost", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "\"Boost\"" - } - ], - "fuzzy": true - }, - { - "id": "New Miner initialization complete.", - "message": "New Miner initialization complete.", - "translation": "New Miner initialization complete.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Migrating lotus-miner config.toml to Curio in-database configuration.", - "message": "Migrating lotus-miner config.toml to Curio in-database configuration.", - "translation": "Migrating lotus-miner config.toml to Curio in-database configuration.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Error getting API: {Error}", - "message": "Error getting API: {Error}", - "translation": "Error getting API: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "could not get API info for FullNode: {Err}", - "message": "could not get API info for FullNode: {Err}", - "translation": "could not get API info for FullNode: {Err}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Err", - "string": "%[1]w", - "type": "error", - "underlyingType": "interface{Error() string}", - "argNum": 1, - "expr": "err" - } - ], - "fuzzy": true - }, - { - "id": "Error getting token: {Error}", - "message": "Error getting token: {Error}", - "translation": "Error getting token: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error saving config to layer: {Error}. Aborting Migration", - "message": "Error saving config to layer: {Error}. Aborting Migration", - "translation": "Error saving config to layer: {Error}. Aborting Migration", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "The Curio team wants to improve the software you use. Tell the team you're using `{Curio}`.", - "message": "The Curio team wants to improve the software you use. Tell the team you're using `{Curio}`.", - "translation": "The Curio team wants to improve the software you use. Tell the team you're using `{Curio}`.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Curio", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "\"curio\"" - } - ], - "fuzzy": true - }, - { - "id": "Select what you want to share with the Curio team.", - "message": "Select what you want to share with the Curio team.", - "translation": "Select what you want to share with the Curio team.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Individual Data: Miner ID, Curio version, chain ({Mainnet} or {Calibration}). Signed.", - "message": "Individual Data: Miner ID, Curio version, chain ({Mainnet} or {Calibration}). Signed.", - "translation": "Individual Data: Miner ID, Curio version, chain ({Mainnet} or {Calibration}). Signed.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Mainnet", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "\"mainnet\"" - }, - { - "id": "Calibration", - "string": "%[2]s", - "type": "string", - "underlyingType": "string", - "argNum": 2, - "expr": "\"calibration\"" - } - ], - "fuzzy": true - }, - { - "id": "Aggregate-Anonymous: version, chain, and Miner power (bucketed).", - "message": "Aggregate-Anonymous: version, chain, and Miner power (bucketed).", - "translation": "Aggregate-Anonymous: version, chain, and Miner power (bucketed).", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Hint: I am someone running Curio on whichever chain.", - "message": "Hint: I am someone running Curio on whichever chain.", - "translation": "Hint: I am someone running Curio on whichever chain.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Nothing.", - "message": "Nothing.", - "translation": "Nothing.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Error getting miner power: {Error}", - "message": "Error getting miner power: {Error}", - "translation": "Error getting miner power: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error marshalling message: {Error}", - "message": "Error marshalling message: {Error}", - "translation": "Error marshalling message: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error getting miner info: {Error}", - "message": "Error getting miner info: {Error}", - "translation": "Error getting miner info: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error signing message: {Error}", - "message": "Error signing message: {Error}", - "translation": "Error signing message: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error sending message: {Error}", - "message": "Error sending message: {Error}", - "translation": "Error sending message: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error sending message: Status {Status}, Message:", - "message": "Error sending message: Status {Status}, Message:", - "translation": "Error sending message: Status {Status}, Message:", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Status", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "resp.Status" - }, - { - "id": "Stringb", - "string": "%[2]v", - "type": "string", - "underlyingType": "string", - "argNum": 2, - "expr": "string(b)" - } - ], - "fuzzy": true - }, - { - "id": "Message sent.", - "message": "Message sent.", - "translation": "Message sent.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Documentation:", - "message": "Documentation:", - "translation": "Documentation:", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "The '{Base}' layer stores common configuration. All curio instances can include it in their {__layers} argument.", - "message": "The '{Base}' layer stores common configuration. All curio instances can include it in their {__layers} argument.", - "translation": "The '{Base}' layer stores common configuration. All curio instances can include it in their {__layers} argument.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Base", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "\"base\"" - }, - { - "id": "__layers", - "string": "%[2]s", - "type": "string", - "underlyingType": "string", - "argNum": 2, - "expr": "\"--layers\"" - } - ], - "fuzzy": true - }, - { - "id": "You can add other layers for per-machine configuration changes.", - "message": "You can add other layers for per-machine configuration changes.", - "translation": "You can add other layers for per-machine configuration changes.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Filecoin {Slack} channels: {Fil_curio_help} and {Fil_curio_dev}", - "message": "Filecoin {Slack} channels: {Fil_curio_help} and {Fil_curio_dev}", - "translation": "Filecoin {Slack} channels: {Fil_curio_help} and {Fil_curio_dev}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Slack", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "\"Slack\"" - }, - { - "id": "Fil_curio_help", - "string": "%[2]s", - "type": "string", - "underlyingType": "string", - "argNum": 2, - "expr": "\"#fil-curio-help\"" - }, - { - "id": "Fil_curio_dev", - "string": "%[3]s", - "type": "string", - "underlyingType": "string", - "argNum": 3, - "expr": "\"#fil-curio-dev\"" - } - ], - "fuzzy": true - }, - { - "id": "Increase reliability using redundancy: start multiple machines with at-least the post layer: 'curio run --layers=post'", - "message": "Increase reliability using redundancy: start multiple machines with at-least the post layer: 'curio run --layers=post'", - "translation": "Increase reliability using redundancy: start multiple machines with at-least the post layer: 'curio run --layers=post'", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "One database can serve multiple miner IDs: Run a migration for each lotus-miner.", - "message": "One database can serve multiple miner IDs: Run a migration for each lotus-miner.", - "translation": "One database can serve multiple miner IDs: Run a migration for each lotus-miner.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Please start (or restart) {Lotus_miner} now that database credentials are in {Toml}.", - "message": "Please start (or restart) {Lotus_miner} now that database credentials are in {Toml}.", - "translation": "Please start (or restart) {Lotus_miner} now that database credentials are in {Toml}.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Lotus_miner", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "\"lotus-miner\"" - }, - { - "id": "Toml", - "string": "%[2]s", - "type": "string", - "underlyingType": "string", - "argNum": 2, - "expr": "\"config.toml\"" - } - ], - "fuzzy": true - }, - { - "id": "Waiting for {Lotus_miner} to write sectors into Yugabyte.", - "message": "Waiting for {Lotus_miner} to write sectors into Yugabyte.", - "translation": "Waiting for {Lotus_miner} to write sectors into Yugabyte.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Lotus_miner", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "\"lotus-miner\"" - } - ], - "fuzzy": true - }, - { - "id": "Error interpreting miner ID: {Error}: ID: {String}", - "message": "Error interpreting miner ID: {Error}: ID: {String}", - "translation": "Error interpreting miner ID: {Error}: ID: {String}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - }, - { - "id": "String", - "string": "%[2]s", - "type": "string", - "underlyingType": "string", - "argNum": 2, - "expr": "d.MinerID.String()" - } - ], - "fuzzy": true - }, - { - "id": "Error verifying sectors: {Error}", - "message": "Error verifying sectors: {Error}", - "translation": "Error verifying sectors: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "The sectors are in the database. The database is ready for {Curio}.", - "message": "The sectors are in the database. The database is ready for {Curio}.", - "translation": "The sectors are in the database. The database is ready for {Curio}.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Curio", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "\"Curio\"" - } - ], - "fuzzy": true - }, - { - "id": "Now shut down lotus-miner and lotus-worker and use run {Rendercurio_run} instead.", - "message": "Now shut down lotus-miner and lotus-worker and use run {Rendercurio_run} instead.", - "translation": "Now shut down lotus-miner and lotus-worker and use run {Rendercurio_run} instead.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Rendercurio_run", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "code.Render(\"curio run\")" - } - ], - "fuzzy": true - }, - { - "id": "Press return to continue", - "message": "Press return to continue", - "translation": "Press return to continue", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Aborting migration.", - "message": "Aborting migration.", - "translation": "Aborting migration.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Sectors verified. {I} sector locations found.", - "message": "Sectors verified. {I} sector locations found.", - "translation": "Sectors verified. {I} sector locations found.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "I", - "string": "%[1]d", - "type": "[]int", - "underlyingType": "[]int", - "argNum": 1, - "expr": "i" - } - ], - "fuzzy": true - }, - { - "id": "Connected to Yugabyte. Schema is current.", - "message": "Connected to Yugabyte. Schema is current.", - "translation": "Connected to Yugabyte. Schema is current.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Enabling Sector Indexing in the database.", - "message": "Enabling Sector Indexing in the database.", - "translation": "Enabling Sector Indexing in the database.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Error encoding config.toml: {Error}", - "message": "Error encoding config.toml: {Error}", - "translation": "Error encoding config.toml: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Press return to update {Toml} with Yugabyte info. A Backup file will be written to that folder before changes are made.", - "message": "Press return to update {Toml} with Yugabyte info. A Backup file will be written to that folder before changes are made.", - "translation": "Press return to update {Toml} with Yugabyte info. A Backup file will be written to that folder before changes are made.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Toml", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "\"config.toml\"" - } - ], - "fuzzy": true - }, - { - "id": "Error expanding path: {Error}", - "message": "Error expanding path: {Error}", - "translation": "Error expanding path: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error reading filemode of config.toml: {Error}", - "message": "Error reading filemode of config.toml: {Error}", - "translation": "Error reading filemode of config.toml: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error creating backup file: {Error}", - "message": "Error creating backup file: {Error}", - "translation": "Error creating backup file: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error reading config.toml: {Error}", - "message": "Error reading config.toml: {Error}", - "translation": "Error reading config.toml: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error writing backup file: {Error}", - "message": "Error writing backup file: {Error}", - "translation": "Error writing backup file: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error closing backup file: {Error}", - "message": "Error closing backup file: {Error}", - "translation": "Error closing backup file: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error writing config.toml: {Error}", - "message": "Error writing config.toml: {Error}", - "translation": "Error writing config.toml: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Restart Lotus Miner.", - "message": "Restart Lotus Miner.", - "translation": "Restart Lotus Miner.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Connected to Yugabyte", - "message": "Connected to Yugabyte", - "translation": "Connected to Yugabyte", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "To start, ensure your sealing pipeline is drained and shut-down lotus-miner.", - "message": "To start, ensure your sealing pipeline is drained and shut-down lotus-miner.", - "translation": "To start, ensure your sealing pipeline is drained and shut-down lotus-miner.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Select the location of your lotus-miner config directory?", - "message": "Select the location of your lotus-miner config directory?", - "translation": "Select the location of your lotus-miner config directory?", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Other", - "message": "Other", - "translation": "Other", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Enter the path to the configuration directory used by {Lotus_miner}", - "message": "Enter the path to the configuration directory used by {Lotus_miner}", - "translation": "Enter the path to the configuration directory used by {Lotus_miner}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Lotus_miner", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "\"lotus-miner\"" - } - ], - "fuzzy": true - }, - { - "id": "No path provided, abandoning migration", - "message": "No path provided, abandoning migration", - "translation": "No path provided, abandoning migration", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Cannot read the config.toml file in the provided directory, Error: {Error}", - "message": "Cannot read the config.toml file in the provided directory, Error: {Error}", - "translation": "Cannot read the config.toml file in the provided directory, Error: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Could not create repo from directory: {Error}. Aborting migration", - "message": "Could not create repo from directory: {Error}. Aborting migration", - "translation": "Could not create repo from directory: {Error}. Aborting migration", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Could not lock miner repo. Your miner must be stopped: {Error}\n Aborting migration", - "message": "Could not lock miner repo. Your miner must be stopped: {Error}\n Aborting migration", - "translation": "Could not lock miner repo. Your miner must be stopped: {Error}\n Aborting migration", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Read Miner Config", - "message": "Read Miner Config", - "translation": "Read Miner Config", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Step Complete: {Step}", - "message": "Step Complete: {Step}", - "translation": "Step Complete: {Step}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Step", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "step" - } - ], - "fuzzy": true - }, - { - "id": "Initializing a new miner actor.", - "message": "Initializing a new miner actor.", - "translation": "Initializing a new miner actor.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Enter the info to create a new miner", - "message": "Enter the info to create a new miner", - "translation": "Enter the info to create a new miner", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Owner Address: {String}", - "message": "Owner Address: {String}", - "translation": "Owner Address: {String}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "String", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "d.owner.String()" - } - ], - "fuzzy": true - }, - { - "id": "Worker Address: {String}", - "message": "Worker Address: {String}", - "translation": "Worker Address: {String}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "String", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "d.worker.String()" - } - ], - "fuzzy": true - }, - { - "id": "Sender Address: {String}", - "message": "Sender Address: {String}", - "translation": "Sender Address: {String}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "String", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "d.sender.String()" - } - ], - "fuzzy": true - }, - { - "id": "Sector Size: {Ssize}", - "message": "Sector Size: {Ssize}", - "translation": "Sector Size: {Ssize}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Ssize", - "string": "%[1]d", - "type": "github.com/filecoin-project/go-state-types/abi.SectorSize", - "underlyingType": "uint64", - "argNum": 1, - "expr": "d.ssize" - } - ], - "fuzzy": true - }, - { - "id": "Confidence epochs: {Confidence}", - "message": "Confidence epochs: {Confidence}", - "translation": "Confidence epochs: {Confidence}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Confidence", - "string": "%[1]d", - "type": "uint64", - "underlyingType": "uint64", - "argNum": 1, - "expr": "d.confidence" - } - ], - "fuzzy": true - }, - { - "id": "Continue to verify the addresses and create a new miner actor.", - "message": "Continue to verify the addresses and create a new miner actor.", - "translation": "Continue to verify the addresses and create a new miner actor.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Miner creation error occurred: {Error}", - "message": "Miner creation error occurred: {Error}", - "translation": "Miner creation error occurred: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Enter the owner address", - "message": "Enter the owner address", - "translation": "Enter the owner address", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "No address provided", - "message": "No address provided", - "translation": "No address provided", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Failed to parse the address: {Error}", - "message": "Failed to parse the address: {Error}", - "translation": "Failed to parse the address: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Enter {Stringworker_senderi_1} address", - "message": "Enter {Stringworker_senderi_1} address", - "translation": "Enter {Stringworker_senderi_1} address", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Stringworker_senderi_1", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "[]string{\"worker\", \"sender\"}[i-1]" - } - ], - "fuzzy": true - }, - { - "id": "Enter the sector size", - "message": "Enter the sector size", - "translation": "Enter the sector size", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "No value provided", - "message": "No value provided", - "translation": "No value provided", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Failed to parse sector size: {Error}", - "message": "Failed to parse sector size: {Error}", - "translation": "Failed to parse sector size: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Confidence epochs", - "message": "Confidence epochs", - "translation": "Confidence epochs", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Failed to parse confidence: {Error}", - "message": "Failed to parse confidence: {Error}", - "translation": "Failed to parse confidence: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Failed to create the miner actor: {Error}", - "message": "Failed to create the miner actor: {Error}", - "translation": "Failed to create the miner actor: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Miner {String} created successfully", - "message": "Miner {String} created successfully", - "translation": "Miner {String} created successfully", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "String", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "miner.String()" - } - ], - "fuzzy": true - }, - { - "id": "Cannot reach the DB: {Error}", - "message": "Cannot reach the DB: {Error}", - "translation": "Cannot reach the DB: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error connecting to full node API: {Error}", - "message": "Error connecting to full node API: {Error}", - "translation": "Error connecting to full node API: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Pre-initialization steps complete", - "message": "Pre-initialization steps complete", - "translation": "Pre-initialization steps complete", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Failed to generate random bytes for secret: {Error}", - "message": "Failed to generate random bytes for secret: {Error}", - "translation": "Failed to generate random bytes for secret: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster {String}' to finish the configuration", - "message": "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster {String}' to finish the configuration", - "translation": "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster {String}' to finish the configuration", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "String", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "d.MinerID.String()" - } - ], - "fuzzy": true - }, - { - "id": "Failed to get API info for FullNode: {Err}", - "message": "Failed to get API info for FullNode: {Err}", - "translation": "Failed to get API info for FullNode: {Err}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Err", - "string": "%[1]w", - "type": "error", - "underlyingType": "interface{Error() string}", - "argNum": 1, - "expr": "err" - } - ], - "fuzzy": true - }, - { - "id": "Failed to verify the auth token from daemon node: {Error}", - "message": "Failed to verify the auth token from daemon node: {Error}", - "translation": "Failed to verify the auth token from daemon node: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Failed to generate default config: {Error}", - "message": "Failed to generate default config: {Error}", - "translation": "Failed to generate default config: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Failed to insert 'base' config layer in database: {Error}", - "message": "Failed to insert 'base' config layer in database: {Error}", - "translation": "Failed to insert 'base' config layer in database: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Configuration 'base' was updated to include this miner's address", - "message": "Configuration 'base' was updated to include this miner's address", - "translation": "Configuration 'base' was updated to include this miner's address", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Failed to load base config from database: {Error}", - "message": "Failed to load base config from database: {Error}", - "translation": "Failed to load base config from database: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Failed to parse base config: {Error}", - "message": "Failed to parse base config: {Error}", - "translation": "Failed to parse base config: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Failed to regenerate base config: {Error}", - "message": "Failed to regenerate base config: {Error}", - "translation": "Failed to regenerate base config: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Enter the info to connect to your Yugabyte database installation (https://download.yugabyte.com/)", - "message": "Enter the info to connect to your Yugabyte database installation (https://download.yugabyte.com/)", - "translation": "Enter the info to connect to your Yugabyte database installation (https://download.yugabyte.com/)", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Host: {Hosts_}", - "message": "Host: {Hosts_}", - "translation": "Host: {Hosts_}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Hosts_", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "strings.Join(harmonyCfg.Hosts, \",\")" - } - ], - "fuzzy": true - }, - { - "id": "Port: {Port}", - "message": "Port: {Port}", - "translation": "Port: {Port}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Port", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "harmonyCfg.Port" - } - ], - "fuzzy": true - }, - { - "id": "Username: {Username}", - "message": "Username: {Username}", - "translation": "Username: {Username}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Username", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "harmonyCfg.Username" - } - ], - "fuzzy": true - }, - { - "id": "Password: {Password}", - "message": "Password: {Password}", - "translation": "Password: {Password}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Password", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "harmonyCfg.Password" - } - ], - "fuzzy": true - }, - { - "id": "Database: {Database}", - "message": "Database: {Database}", - "translation": "Database: {Database}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Database", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "harmonyCfg.Database" - } - ], - "fuzzy": true - }, - { - "id": "Continue to connect and update schema.", - "message": "Continue to connect and update schema.", - "translation": "Continue to connect and update schema.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Database config error occurred, abandoning migration: {Error}", - "message": "Database config error occurred, abandoning migration: {Error}", - "translation": "Database config error occurred, abandoning migration: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Enter the Yugabyte database host(s)", - "message": "Enter the Yugabyte database host(s)", - "translation": "Enter the Yugabyte database host(s)", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "No host provided", - "message": "No host provided", - "translation": "No host provided", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Enter the Yugabyte database {Stringport_username_password_databasei_1}", - "message": "Enter the Yugabyte database {Stringport_username_password_databasei_1}", - "translation": "Enter the Yugabyte database {Stringport_username_password_databasei_1}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Stringport_username_password_databasei_1", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "[]string{\"port\", \"username\", \"password\", \"database\"}[i-1]" - } - ], - "fuzzy": true - }, - { - "id": "Error connecting to Yugabyte database: {Error}", - "message": "Error connecting to Yugabyte database: {Error}", - "translation": "Error connecting to Yugabyte database: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Configuration 'base' was updated to include this miner's address ({MinerAddress}) and its wallet setup.", - "message": "Configuration 'base' was updated to include this miner's address ({MinerAddress}) and its wallet setup.", - "translation": "Configuration 'base' was updated to include this miner's address ({MinerAddress}) and its wallet setup.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "MinerAddress", - "string": "%[1]s", - "type": "github.com/filecoin-project/go-address.Address", - "underlyingType": "struct{str string}", - "argNum": 1, - "expr": "minerAddress" - } - ], - "fuzzy": true - }, - { - "id": "Compare the configurations {Base} to {MinerAddresses0}. Changes between the miner IDs other than wallet addreses should be a new, minimal layer for runners that need it.", - "message": "Compare the configurations {Base} to {MinerAddresses0}. Changes between the miner IDs other than wallet addreses should be a new, minimal layer for runners that need it.", - "translation": "Compare the configurations {Base} to {MinerAddresses0}. Changes between the miner IDs other than wallet addreses should be a new, minimal layer for runners that need it.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Base", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "\"base\"" - }, - { - "id": "MinerAddresses0", - "string": "%[2]s", - "type": "string", - "underlyingType": "string", - "argNum": 2, - "expr": "\"mig-\" + curioCfg.Addresses[0].MinerAddresses[0]" - } - ], - "fuzzy": true - }, - { - "id": "Configuration 'base' was created to resemble this lotus-miner's config.toml .", - "message": "Configuration 'base' was created to resemble this lotus-miner's config.toml .", - "translation": "Configuration 'base' was created to resemble this lotus-miner's config.toml .", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Layer {LayerName} created.", - "message": "Layer {LayerName} created.", - "translation": "Layer {LayerName} created.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "LayerName", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "layerName" - } - ], - "fuzzy": true - }, - { - "id": "To work with the config:", - "message": "To work with the config:", - "translation": "To work with the config:", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "To run Curio: With machine or cgroup isolation, use the command (with example layer selection):", - "message": "To run Curio: With machine or cgroup isolation, use the command (with example layer selection):", - "translation": "To run Curio: With machine or cgroup isolation, use the command (with example layer selection):", - "translatorComment": "Copied from source.", - "fuzzy": true - } - ] -} \ No newline at end of file diff --git a/cmd/curio/internal/translations/locales/ko/messages.gotext.json b/cmd/curio/internal/translations/locales/ko/messages.gotext.json deleted file mode 100644 index bac1140e8e0..00000000000 --- a/cmd/curio/internal/translations/locales/ko/messages.gotext.json +++ /dev/null @@ -1,1106 +0,0 @@ -{ - "language": "ko", - "messages": [ - { - "id": "This interactive tool will walk you through migration of Curio.\nPress Ctrl+C to exit at any time.", - "message": "This interactive tool will walk you through migration of Curio.\nPress Ctrl+C to exit at any time.", - "translation": "이 대화형 도구는 Curio 마이그레이션 과정을 안내합니다.\n언제든지 종료하려면 Ctrl+C를 누르십시오." - }, - { - "id": "This tool confirms each action it does.", - "message": "This tool confirms each action it does.", - "translation": "이 도구는 수행하는 각 작업을 확인합니다." - }, - { - "id": "Ctrl+C pressed in Terminal", - "message": "Ctrl+C pressed in Terminal", - "translation": "터미널에서 Ctrl+C가 눌림" - }, - { - "id": "Verifying Sectors exist in Yugabyte.", - "message": "Verifying Sectors exist in Yugabyte.", - "translation": "Yugabyte에 섹터가 존재하는지 확인 중." - }, - { - "id": "Error verifying sectors: {Error}", - "message": "Error verifying sectors: {Error}", - "translation": "섹터 확인 중 오류 발생: {Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Sectors verified. {I} sectors found.", - "message": "Sectors verified. {I} sectors found.", - "translation": "섹터가 확인되었습니다. {I}개의 섹터가 발견되었습니다.", - "placeholders": [ - { - "id": "I", - "string": "%[1]d", - "type": "[]int", - "underlyingType": "[]int", - "argNum": 1, - "expr": "i" - } - ] - }, - { - "id": "Never remove the database info from the config.toml for lotus-miner as it avoids double PoSt.", - "message": "Never remove the database info from the config.toml for lotus-miner as it avoids double PoSt.", - "translation": "로터스 마이너의 config.toml에서 데이터베이스 정보를 제거하지 마십시오. 두 번의 PoSt를 피하기 위함입니다." - }, - { - "id": "Enter the info to connect to your Yugabyte database installation (https://download.yugabyte.com/)", - "message": "Enter the info to connect to your Yugabyte database installation (https://download.yugabyte.com/)", - "translation": "Yugabyte 데이터베이스 설치에 연결할 정보를 입력하십시오 (https://download.yugabyte.com/)" - }, - { - "id": "Host: {Hosts_}", - "message": "Host: {Hosts_}", - "translation": "호스트: {Hosts_}", - "placeholders": [ - { - "id": "Hosts_", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "strings.Join(harmonycfg.Hosts, \",\")" - } - ] - }, - { - "id": "Port: {Port}", - "message": "Port: {Port}", - "translation": "포트: {Port}", - "placeholders": [ - { - "id": "Port", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "harmonycfg.Port" - } - ] - }, - { - "id": "Username: {Username}", - "message": "Username: {Username}", - "translation": "사용자 이름: {Username}", - "placeholders": [ - { - "id": "Username", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "harmonycfg.Username" - } - ] - }, - { - "id": "Password: {Password}", - "message": "Password: {Password}", - "translation": "비밀번호: {Password}", - "placeholders": [ - { - "id": "Password", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "harmonycfg.Password" - } - ] - }, - { - "id": "Database: {Database}", - "message": "Database: {Database}", - "translation": "데이터베이스: {Database}", - "placeholders": [ - { - "id": "Database", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "harmonycfg.Database" - } - ] - }, - { - "id": "Continue to connect and update schema.", - "message": "Continue to connect and update schema.", - "translation": "계속 연결 및 스키마 업데이트." - }, - { - "id": "Database config error occurred, abandoning migration: {Error}", - "message": "Database config error occurred, abandoning migration: {Error}", - "translation": "데이터베이스 구성 오류가 발생하여 마이그레이션을 포기합니다: {Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Enter the Yugabyte database host(s)", - "message": "Enter the Yugabyte database host(s)", - "translation": "Yugabyte 데이터베이스 호스트를 입력하십시오" - }, - { - "id": "No host provided", - "message": "No host provided", - "translation": "호스트가 제공되지 않았습니다" - }, - { - "id": "Enter the Yugabyte database {Stringport_username_password_databasei_1}", - "message": "Enter the Yugabyte database {Stringport_username_password_databasei_1}", - "translation": "Yugabyte 데이터베이스 {Stringport_username_password_databasei_1}을 입력하십시오", - "placeholders": [ - { - "id": "Stringport_username_password_databasei_1", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "[]string{\"port\", \"username\", \"password\", \"database\"}[i-1]" - } - ] - }, - { - "id": "No value provided", - "message": "No value provided", - "translation": "값이 제공되지 않았습니다" - }, - { - "id": "Error connecting to Yugabyte database: {Error}", - "message": "Error connecting to Yugabyte database: {Error}", - "translation": "Yugabyte 데이터베이스에 연결하는 중 오류가 발생했습니다: {Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Connected to Yugabyte. Schema is current.", - "message": "Connected to Yugabyte. Schema is current.", - "translation": "Yugabyte에 연결되었습니다. 스키마가 현재입니다." - }, - { - "id": "Error encoding config.toml: {Error}", - "message": "Error encoding config.toml: {Error}", - "translation": "config.toml을 인코딩하는 중 오류가 발생했습니다: {Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Error reading filemode of config.toml: {Error}", - "message": "Error reading filemode of config.toml: {Error}", - "translation": "config.toml의 파일 모드를 읽는 중 오류가 발생했습니다: {Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Error writing config.toml: {Error}", - "message": "Error writing config.toml: {Error}", - "translation": "config.toml을 쓰는 중 오류가 발생했습니다: {Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Restart Lotus Miner.", - "message": "Restart Lotus Miner.", - "translation": "로터스 마이너 재시작." - }, - { - "id": "Connected to Yugabyte", - "message": "Connected to Yugabyte", - "translation": "Yugabyte에 연결됨" - }, - { - "id": "Select the location of your lotus-miner config directory?", - "message": "Select the location of your lotus-miner config directory?", - "translation": "로터스 마이너 구성 디렉토리의 위치를 선택하시겠습니까?" - }, - { - "id": "Other", - "message": "Other", - "translation": "기타" - }, - { - "id": "Enter the path to the configuration directory used by lotus-miner", - "message": "Enter the path to the configuration directory used by lotus-miner", - "translation": "로터스 마이너에서 사용하는 구성 디렉토리의 경로를 입력하십시오" - }, - { - "id": "No path provided, abandoning migration", - "message": "No path provided, abandoning migration", - "translation": "경로가 제공되지 않았으므로 마이그레이션을 포기합니다" - }, - { - "id": "Cannot read the config.toml file in the provided directory, Error: {Error}", - "message": "Cannot read the config.toml file in the provided directory, Error: {Error}", - "translation": "제공된 디렉토리에서 config.toml 파일을 읽을 수 없습니다. 오류: {Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Read Miner Config", - "message": "Read Miner Config", - "translation": "마이너 구성 읽기" - }, - { - "id": "Completed Step: {Step}", - "message": "Completed Step: {Step}", - "translation": "단계 완료: {Step}", - "placeholders": [ - { - "id": "Step", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "step" - } - ] - }, - { - "id": "This interactive tool migrates lotus-miner to Curio in 5 minutes.", - "translation": "이 대화형 도구는 5분 안에 lotus-miner를 Curio로 이주합니다.", - "message": "This interactive tool migrates lotus-miner to Curio in 5 minutes.", - "placeholder": null - }, - { - "id": "Each step needs your confirmation and can be reversed. Press Ctrl+C to exit at any time.", - "translation": "각 단계는 확인이 필요하며 되돌릴 수 있습니다. 언제든지 Ctrl+C를 눌러 종료할 수 있습니다.", - "message": "Each step needs your confirmation and can be reversed. Press Ctrl+C to exit at any time.", - "placeholder": null - }, - { - "id": "Use the arrow keys to navigate: ↓ ↑ → ←", - "translation": "화살표 키를 사용하여 이동하세요: ↓ ↑ → ←", - "message": "Use the arrow keys to navigate: ↓ ↑ → ←", - "placeholder": null - }, - { - "id": "Lotus-Miner to Curio Migration.", - "translation": "Lotus-Miner에서 Curio로 이주.", - "message": "Lotus-Miner to Curio Migration.", - "placeholder": null - }, - { - "id": "Try the web interface with for further guided improvements.", - "translation": "더 나은 안내를 위해 웹 인터페이스를 사용해보세요.", - "message": "Try the web interface with for further guided improvements.", - "placeholder": null - }, - { - "id": "You can now migrate your market node ({Boost}), if applicable.", - "translation": "해당하는 경우 이제 시장 노드를 이주할 수 있습니다 ({Boost}).", - "message": "You can now migrate your market node ({Boost}), if applicable.", - "placeholder": null - }, - { - "id": "Migrating config.toml to database.", - "translation": "config.toml을 데이터베이스로 이주 중입니다.", - "message": "Migrating config.toml to database.", - "placeholder": null - }, - { - "id": "Error reading from database: {Error}. Aborting Migration.", - "translation": "데이터베이스에서 읽는 중 오류 발생: {Error}. 마이그레이션 중단.", - "message": "Error reading from database: {Error}. Aborting Migration.", - "placeholder": null - }, - { - "id": "cannot read API: {Error}. Aborting Migration", - "translation": "API를 읽을 수 없습니다: {Error}. 마이그레이션 중단", - "message": "cannot read API: {Error}. Aborting Migration", - "placeholder": null - }, - { - "id": "Error saving config to layer: {Error}. Aborting Migration", - "translation": "레이어에 구성을 저장하는 중 오류 발생: {Error}. 마이그레이션 중단", - "message": "Error saving config to layer: {Error}. Aborting Migration", - "placeholder": null - }, - { - "id": "Protocol Labs wants to improve the software you use. Tell the team you're using Curio.", - "translation": "Protocol Labs는 당신이 사용하는 소프트웨어를 개선하고 싶어합니다. Curio를 사용 중이라고 팀에 알려주세요.", - "message": "Protocol Labs wants to improve the software you use. Tell the team you're using Curio.", - "placeholder": null - }, - { - "id": "Select what you want to share with the Curio team.", - "translation": "Curio 팀과 공유하고 싶은 것을 선택하세요.", - "message": "Select what you want to share with the Curio team.", - "placeholder": null - }, - { - "id": "Individual Data: Miner ID, Curio version, net ({Mainnet} or {Testnet}). Signed.", - "translation": "개별 데이터: 마이너 ID, Curio 버전, 네트워크 ({Mainnet} 또는 {Testnet}). 서명됨.", - "message": "Individual Data: Miner ID, Curio version, net ({Mainnet} or {Testnet}). Signed.", - "placeholder": null - }, - { - "id": "Aggregate-Anonymous: version, net, and Miner power (bucketed).", - "translation": "집계-익명: 버전, 네트워크, 그리고 마이너 파워 (버킷).", - "message": "Aggregate-Anonymous: version, net, and Miner power (bucketed).", - "placeholder": null - }, - { - "id": "Hint: I am someone running Curio on net.", - "translation": "힌트: 네트워크에서 Curio를 실행 중인 사람입니다.", - "message": "Hint: I am someone running Curio on net.", - "placeholder": null - }, - { - "id": "Nothing.", - "translation": "아무것도 없습니다.", - "message": "Nothing.", - "placeholder": null - }, - { - "id": "Aborting remaining steps.", - "translation": "나머지 단계를 중단합니다.", - "message": "Aborting remaining steps.", - "placeholder": null - }, - { - "id": "Error connecting to lotus node: {Error}", - "translation": "로터스 노드에 연결하는 중 오류 발생: {Error}", - "message": "Error connecting to lotus node: {Error}", - "placeholder": null - }, - { - "id": "Error getting miner power: {Error}", - "translation": "마이너 파워를 가져오는 중 오류 발생: {Error}", - "message": "Error getting miner power: {Error}", - "placeholder": null - }, - { - "id": "Error marshalling message: {Error}", - "translation": "메시지를 마샬하는 중 오류 발생: {Error}", - "message": "Error marshalling message: {Error}", - "placeholder": null - }, - { - "id": "Error getting miner info: {Error}", - "translation": "마이너 정보를 가져오는 중 오류 발생: {Error}", - "message": "Error getting miner info: {Error}", - "placeholder": null - }, - { - "id": "Error signing message: {Error}", - "translation": "메시지 서명 중 오류 발생: {Error}", - "message": "Error signing message: {Error}", - "placeholder": null - }, - { - "id": "Error sending message: {Error}", - "translation": "메시지 전송 중 오류 발생: {Error}", - "message": "Error sending message: {Error}", - "placeholder": null - }, - { - "id": "Error sending message: Status {Status}, Message:", - "translation": "메시지 전송 중 오류 발생: 상태 {Status}, 메시지:", - "message": "Error sending message: Status {Status}, Message:", - "placeholder": null - }, - { - "id": "Message sent.", - "translation": "메시지가 전송되었습니다.", - "message": "Message sent.", - "placeholder": null - }, - { - "id": "Documentation:", - "translation": "문서:", - "message": "Documentation:", - "placeholder": null - }, - { - "id": "The '{Base}' layer stores common configuration. All curio instances can include it in their {__layers} argument.", - "translation": "'{Base}' 레이어에는 공통 구성이 저장됩니다. 모든 Curio 인스턴스는 {__layers} 인수에 포함시킬 수 있습니다.", - "message": "The '{Base}' layer stores common configuration. All curio instances can include it in their {__layers} argument.", - "placeholder": null - }, - { - "id": "You can add other layers for per-machine configuration changes.", - "translation": "기계별 구성 변경을 위해 다른 레이어를 추가할 수 있습니다.", - "message": "You can add other layers for per-machine configuration changes.", - "placeholder": null - }, - { - "id": "Join {Fil_curio_help} in Filecoin {Slack} for help.", - "translation": "도움을 위해 Filecoin {Slack}의 {Fil_curio_help}에 가입하세요.", - "message": "Join {Fil_curio_help} in Filecoin {Slack} for help.", - "placeholder": null - }, - { - "id": "Join {Fil_curio_dev} in Filecoin {Slack} to follow development and feedback!", - "translation": "개발과 피드백을 따르려면 Filecoin {Slack}의 {Fil_curio_dev}에 가입하세요!", - "message": "Join {Fil_curio_dev} in Filecoin {Slack} to follow development and feedback!", - "placeholder": null - }, - { - "id": "Want PoST redundancy? Run many Curio instances with the '{Post}' layer.", - "translation": "PoST 중복성이 필요하신가요? '{Post}' 레이어와 함께 여러 Curio 인스턴스를 실행하세요.", - "message": "Want PoST redundancy? Run many Curio instances with the '{Post}' layer.", - "placeholder": null - }, - { - "id": "Point your browser to your web GUI to complete setup with {Boost} and advanced featues.", - "translation": "브라우저를 웹 GUI로 이동하여 {Boost} 및 고급 기능으로 설정을 완료하세요.", - "message": "Point your browser to your web GUI to complete setup with {Boost} and advanced featues.", - "placeholder": null - }, - { - "id": "For SPs with multiple Miner IDs, run 1 migration per lotus-miner all to the same 1 database. The cluster will serve all Miner IDs.", - "translation": "여러 마이너 ID가 있는 SP의 경우 각 lotus-miner당 1회 마이그레이션을 동일한 1개의 데이터베이스로 모두 실행하세요. 클러스터는 모든 마이너 ID를 제공합니다.", - "message": "For SPs with multiple Miner IDs, run 1 migration per lotus-miner all to the same 1 database. The cluster will serve all Miner IDs.", - "placeholder": null - }, - { - "id": "Please start {Lotus_miner} now that database credentials are in {Toml}.", - "translation": "데이터베이스 자격 증명이 {Toml}에 있으므로 이제 {Lotus_miner}를 시작하세요.", - "message": "Please start {Lotus_miner} now that database credentials are in {Toml}.", - "placeholder": null - }, - { - "id": "Waiting for {Lotus_miner} to write sectors into Yugabyte.", - "translation": "{Lotus_miner}가 Yugabyte에 섹터를 기록하도록 대기 중입니다.", - "message": "Waiting for {Lotus_miner} to write sectors into Yugabyte.", - "placeholder": null - }, - { - "id": "The sectors are in the database. The database is ready for {Curio}.", - "translation": "섹터가 데이터베이스에 있습니다. 데이터베이스가 {Curio}를 위해 준비되었습니다.", - "message": "The sectors are in the database. The database is ready for {Curio}.", - "placeholder": null - }, - { - "id": "Now shut down lotus-miner and move the systems to {Curio}.", - "translation": "이제 lotus-miner를 종료하고 시스템을 {Curio}로 이동하세요.", - "message": "Now shut down lotus-miner and move the systems to {Curio}.", - "placeholder": null - }, - { - "id": "Press return to continue", - "translation": "계속하려면 리턴을 누르세요", - "message": "Press return to continue", - "placeholder": null - }, - { - "id": "Aborting migration.", - "translation": "마이그레이션 중단.", - "message": "Aborting migration.", - "placeholder": null - }, - { - "id": "Sectors verified. {I} sector locations found.", - "translation": "섹터가 확인되었습니다. {I}개의 섹터 위치를 찾았습니다.", - "message": "Sectors verified. {I} sector locations found.", - "placeholder": null - }, - { - "id": "Press return to update {Toml} with Yugabyte info. Backup the file now.", - "translation": "{Toml}을 Yugabyte 정보로 업데이트하려면 리턴을 누르세요. 지금 파일을 백업하세요.", - "message": "Press return to update {Toml} with Yugabyte info. Backup the file now.", - "placeholder": null - }, - { - "id": "To start, ensure your sealing pipeline is drained and shut-down lotus-miner.", - "translation": "시작하려면 밀봉 파이프라인이 비어 있고 lotus-miner가 종료되었는지 확인하세요.", - "message": "To start, ensure your sealing pipeline is drained and shut-down lotus-miner.", - "placeholder": null - }, - { - "id": "Enter the path to the configuration directory used by {Lotus_miner}", - "translation": "{Lotus_miner}에서 사용하는 구성 디렉터리 경로를 입력하세요.", - "message": "Enter the path to the configuration directory used by {Lotus_miner}", - "placeholder": null - }, - { - "id": "Step Complete: {Step}", - "translation": "단계 완료: {Step}", - "message": "Step Complete: {Step}", - "placeholder": null - }, - { - "id": "Configuration 'base' was updated to include this miner's address and its wallet setup.", - "translation": "이 마이너의 주소와 지갑 설정을 포함하도록 구성 'base'가 업데이트되었습니다.", - "message": "Configuration 'base' was updated to include this miner's address and its wallet setup.", - "placeholder": null - }, - { - "id": "Compare the configurations {Base} to {MinerAddresses0}. Changes between the miner IDs other than wallet addreses should be a new, minimal layer for runners that need it.", - "translation": "구성 {Base}를 {MinerAddresses0}과 비교하세요. 지갑 주소 이외의 마이너 ID 사이의 변경 사항은 필요한 실행자를 위한 새로운 최소한의 레이어여야 합니다.", - "message": "Compare the configurations {Base} to {MinerAddresses0}. Changes between the miner IDs other than wallet addreses should be a new, minimal layer for runners that need it.", - "placeholder": null - }, - { - "id": "Configuration 'base' was created to include this miner's address and its wallet setup.", - "translation": "이 마이너의 주소와 지갑 설정을 포함하도록 구성 'base'가 생성되었습니다.", - "message": "Configuration 'base' was created to include this miner's address and its wallet setup.", - "placeholder": null - }, - { - "id": "Layer {LayerName} created.", - "translation": "레이어 {LayerName}가 생성되었습니다.", - "message": "Layer {LayerName} created.", - "placeholder": null - }, - { - "id": "To work with the config: \\n", - "translation": "구성을 사용하려면: \\n", - "message": "To work with the config: \\n", - "placeholder": null - }, - { - "id": "To run Curio: With machine or cgroup isolation, use the command (with example layer selection):", - "translation": "Curio를 실행하려면: 기계 또는 cgroup 격리를 사용하여 다음 명령을 사용하세요 (예제 레이어 선택과 함께):", - "message": "To run Curio: With machine or cgroup isolation, use the command (with example layer selection):", - "placeholder": null - }, - { - "id": "Try the web interface with {__layersgui} for further guided improvements.", - "translation": "더 많은 안내를 위해 {__layersgui}를 사용하여 웹 인터페이스를 시도하세요.", - "message": "Try the web interface with {__layersgui} for further guided improvements.", - "placeholder": null - }, - { - "id": "Error connecting to lotus node: {Error} {Error_1}", - "translation": "lotus 노드에 연결하는 중 오류 발생: {Error} {Error_1}", - "message": "Error connecting to lotus node: {Error} {Error_1}", - "placeholder": null - }, - { - "id": "could not get API info for FullNode: {Err}", - "translation": "FullNode의 API 정보를 가져올 수 없습니다: {Err}", - "message": "could not get API info for FullNode: {Err}", - "placeholder": null - }, - { - "id": "Error getting token: {Error}", - "translation": "토큰을 가져오는 중 오류 발생: {Error}", - "message": "Error getting token: {Error}", - "placeholder": null - }, - { - "id": "Filecoin {Slack} channels: {Fil_curio_help} and {Fil_curio_dev}", - "translation": "Filecoin {Slack} 채널: {Fil_curio_help} 및 {Fil_curio_dev}", - "message": "Filecoin {Slack} channels: {Fil_curio_help} and {Fil_curio_dev}", - "placeholder": null - }, - { - "id": "Start multiple Curio instances with the '{Post}' layer to redundancy.", - "translation": "'{Post}' 레이어로 여러 Curio 인스턴스를 시작하여 중복성을 확보하세요.", - "message": "Start multiple Curio instances with the '{Post}' layer to redundancy.", - "placeholder": null - }, - { - "id": "One database can serve multiple miner IDs: Run a migration for each lotus-miner.", - "translation": "한 개의 데이터베이스는 여러 광부 ID를 제공할 수 있습니다: 각 lotus-miner에 대해 마이그레이션을 실행하세요.", - "message": "One database can serve multiple miner IDs: Run a migration for each lotus-miner.", - "placeholder": null - }, - { - "id": "Please start (or restart) {Lotus_miner} now that database credentials are in {Toml}.", - "translation": "데이터베이스 자격 증명이 {Toml}에 입력되었으므로 지금 {Lotus_miner}을 시작하거나 다시 시작하세요.", - "message": "Please start (or restart) {Lotus_miner} now that database credentials are in {Toml}.", - "placeholder": null - }, - { - "id": "Error interpreting miner ID: {Error}: ID: {String}", - "translation": "광부 ID를 해석하는 중 오류 발생: {Error}: ID: {String}", - "message": "Error interpreting miner ID: {Error}: ID: {String}", - "placeholder": null - }, - { - "id": "Enabling Sector Indexing in the database.", - "translation": "데이터베이스에서 Sector Indexing을 활성화합니다.", - "message": "Enabling Sector Indexing in the database.", - "placeholder": null - }, - { - "id": "Error expanding path: {Error}", - "translation": "경로를 확장하는 중 오류 발생: {Error}", - "message": "Error expanding path: {Error}", - "placeholder": null - }, - { - "id": "Could not create repo from directory: {Error}. Aborting migration", - "translation": "디렉토리에서 저장소를 생성할 수 없습니다: {Error}. 마이그레이션을 중단합니다.", - "message": "Could not create repo from directory: {Error}. Aborting migration", - "placeholder": null - }, - { - "id": "Could not lock miner repo. Your miner must be stopped: {Error}\n Aborting migration", - "translation": "광부 저장소를 잠금 해제할 수 없습니다. 귀하의 광부를 중지해야 합니다: {Error}\n 마이그레이션을 중단합니다.", - "message": "Could not lock miner repo. Your miner must be stopped: {Error}\n Aborting migration", - "placeholder": null - }, - { - "id": "To work with the config:", - "translation": "구성 파일을 사용하려면:", - "message": "To work with the config:", - "placeholder": null - }, - { - "id": "This interactive tool creates a new miner actor and creates the basic configuration layer for it.", - "translation": "이 대화형 도구는 새로운 채굴자 액터를 생성하고 그에 대한 기본 구성 레이어를 생성합니다.", - "message": "This interactive tool creates a new miner actor and creates the basic configuration layer for it.", - "placeholder": null - }, - { - "id": "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster {Arg_1}' to finish the configuration.", - "translation": "이 프로세스는 부분적으로 idempotent합니다. 새로운 채굴자 액터가 생성되었고 후속 단계가 실패하면 사용자는 구성을 완료하기 위해 'curio config new-cluster {Arg_1}'를 실행해야 합니다.", - "message": "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster {Arg_1}' to finish the configuration.", - "placeholder": null - }, - { - "id": "Choose if you with to create a new miner or migrate from existing Lotus-Miner", - "translation": "새 채굴자를 생성할지 기존의 Lotus-Miner에서 이전할지 선택하세요.", - "message": "Choose if you with to create a new miner or migrate from existing Lotus-Miner", - "placeholder": null - }, - { - "id": "Migrate from existing Lotus-Miner", - "translation": "기존의 Lotus-Miner에서 이전하기", - "message": "Migrate from existing Lotus-Miner", - "placeholder": null - }, - { - "id": "Create a new miner", - "translation": "새로운 채굴자 생성", - "message": "Create a new miner", - "placeholder": null - }, - { - "id": "New Miner initialization complete.", - "translation": "새로운 채굴자 초기화 완료.", - "message": "New Miner initialization complete.", - "placeholder": null - }, - { - "id": "Migrating lotus-miner config.toml to Curio in-database configuration.", - "translation": "lotus-miner config.toml을 Curio의 데이터베이스 구성으로 이전 중입니다.", - "message": "Migrating lotus-miner config.toml to Curio in-database configuration.", - "placeholder": null - }, - { - "id": "Error getting API: {Error}", - "translation": "API 가져오기 오류: {Error}", - "message": "Error getting API: {Error}", - "placeholder": null - }, - { - "id": "The Curio team wants to improve the software you use. Tell the team you're using {Curio}.", - "translation": "Curio 팀은 당신이 사용하는 소프트웨어를 개선하고자 합니다. 팀에게 {Curio}를 사용 중이라고 알려주세요.", - "message": "The Curio team wants to improve the software you use. Tell the team you're using {Curio}.", - "placeholder": null - }, - { - "id": "Individual Data: Miner ID, Curio version, chain ({Mainnet} or {Calibration}). Signed.", - "translation": "개별 데이터: 채굴자 ID, Curio 버전, 체인 ({Mainnet} 또는 {Calibration}). 서명됨.", - "message": "Individual Data: Miner ID, Curio version, chain ({Mainnet} or {Calibration}). Signed.", - "placeholder": null - }, - { - "id": "Aggregate-Anonymous: version, chain, and Miner power (bucketed).", - "translation": "집계-익명: 버전, 체인, 및 채굴자 파워 (버킷).", - "message": "Aggregate-Anonymous: version, chain, and Miner power (bucketed).", - "placeholder": null - }, - { - "id": "Hint: I am someone running Curio on whichever chain.", - "translation": "힌트: 나는 어떤 체인에서든 Curio를 실행 중인 사람입니다.", - "message": "Hint: I am someone running Curio on whichever chain.", - "placeholder": null - }, - { - "id": "Press return to update {Toml} with Yugabyte info. A Backup file will be written to that folder before changes are made.", - "translation": "{Toml}을 Yugabyte 정보로 업데이트하려면 리턴 키를 누르세요. 변경 사항을 적용하기 전에 해당 폴더에 백업 파일이 작성됩니다.", - "message": "Press return to update {Toml} with Yugabyte info. A Backup file will be written to that folder before changes are made.", - "placeholder": null - }, - { - "id": "Error creating backup file: {Error}", - "translation": "백업 파일 생성 오류: {Error}", - "message": "Error creating backup file: {Error}", - "placeholder": null - }, - { - "id": "Error reading config.toml: {Error}", - "translation": "config.toml 읽기 오류: {Error}", - "message": "Error reading config.toml: {Error}", - "placeholder": null - }, - { - "id": "Error writing backup file: {Error}", - "translation": "백업 파일 쓰기 오류: {Error}", - "message": "Error writing backup file: {Error}", - "placeholder": null - }, - { - "id": "Error closing backup file: {Error}", - "translation": "백업 파일 닫기 오류: {Error}", - "message": "Error closing backup file: {Error}", - "placeholder": null - }, - { - "id": "Initializing a new miner actor.", - "translation": "새 채굴자 액터 초기화 중.", - "message": "Initializing a new miner actor.", - "placeholder": null - }, - { - "id": "Enter the info to create a new miner", - "translation": "새 채굴자를 생성하기 위한 정보 입력", - "message": "Enter the info to create a new miner", - "placeholder": null - }, - { - "id": "Owner Address: {String}", - "translation": "소유자 주소: {String}", - "message": "Owner Address: {String}", - "placeholder": null - }, - { - "id": "Worker Address: {String}", - "translation": "작업자 주소: {String}", - "message": "Worker Address: {String}", - "placeholder": null - }, - { - "id": "Sender Address: {String}", - "translation": "송신자 주소: {String}", - "message": "Sender Address: {String}", - "placeholder": null - }, - { - "id": "Sector Size: {Ssize}", - "translation": "섹터 크기: {Ssize}", - "message": "Sector Size: {Ssize}", - "placeholder": null - }, - { - "id": "Confidence: {Confidence}", - "translation": "신뢰도: {Confidence}", - "message": "Confidence: {Confidence}", - "placeholder": null - }, - { - "id": "Continue to verify the addresses and create a new miner actor.", - "translation": "주소를 확인하고 새 채굴자 액터를 생성하려면 계속 진행하세요.", - "message": "Continue to verify the addresses and create a new miner actor.", - "placeholder": null - }, - { - "id": "Miner creation error occurred: {Error}", - "translation": "채굴자 생성 오류 발생: {Error}", - "message": "Miner creation error occurred: {Error}", - "placeholder": null - }, - { - "id": "Enter the owner address", - "translation": "소유자 주소 입력", - "message": "Enter the owner address", - "placeholder": null - }, - { - "id": "No address provided", - "translation": "주소가 제공되지 않았습니다", - "message": "No address provided", - "placeholder": null - }, - { - "id": "Failed to parse the address: {Error}", - "translation": "주소 구문 분석 실패: {Error}", - "message": "Failed to parse the address: {Error}", - "placeholder": null - }, - { - "id": "Enter {Stringworker_senderi_1} address", - "translation": "{Stringworker_senderi_1} 주소 입력", - "message": "Enter {Stringworker_senderi_1} address", - "placeholder": null - }, - { - "id": "Enter the sector size", - "translation": "섹터 크기 입력", - "message": "Enter the sector size", - "placeholder": null - }, - { - "id": "Failed to parse sector size: {Error}", - "translation": "섹터 크기 구문 분석 실패: {Error}", - "message": "Failed to parse sector size: {Error}", - "placeholder": null - }, - { - "id": "Enter the confidence", - "translation": "신뢰도 입력", - "message": "Enter the confidence", - "placeholder": null - }, - { - "id": "Failed to parse confidence: {Error}", - "translation": "신뢰도 구문 분석 실패: {Error}", - "message": "Failed to parse confidence: {Error}", - "placeholder": null - }, - { - "id": "Failed to create the miner actor: {Error}", - "translation": "채굴자 액터 생성 실패: {Error}", - "message": "Failed to create the miner actor: {Error}", - "placeholder": null - }, - { - "id": "Miner {String} created successfully", - "translation": "{String} 채굴자가 성공적으로 생성되었습니다", - "message": "Miner {String} created successfully", - "placeholder": null - }, - { - "id": "Cannot reach the DB: {Error}", - "translation": "데이터베이스에 연결할 수 없습니다: {Error}", - "message": "Cannot reach the DB: {Error}", - "placeholder": null - }, - { - "id": "Error connecting to full node API: {Error}", - "translation": "풀 노드 API에 연결하는 중 오류 발생: {Error}", - "message": "Error connecting to full node API: {Error}", - "placeholder": null - }, - { - "id": "Pre-initialization steps complete", - "translation": "사전 초기화 단계 완료", - "message": "Pre-initialization steps complete", - "placeholder": null - }, - { - "id": "Failed to random bytes for secret: {Error}", - "translation": "비밀을 위한 랜덤 바이트 생성 실패: {Error}", - "message": "Failed to random bytes for secret: {Error}", - "placeholder": null - }, - { - "id": "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster {String}' to finish the configuration", - "translation": "마이너 생성은 idempotent하지 않으므로 가이드 설정을 다시 실행하지 마십시오. 구성을 완료하려면 'curio config new-cluster {String}'를 실행해야 합니다.", - "message": "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster {String}' to finish the configuration", - "placeholder": null - }, - { - "id": "Failed to verify the auth token from daemon node: {Error}", - "translation": "데몬 노드로부터 인증 토큰을 확인하는 중 오류 발생: {Error}", - "message": "Failed to verify the auth token from daemon node: {Error}", - "placeholder": null - }, - { - "id": "Failed to encode the config: {Error}", - "translation": "구성을 인코딩하는 중 오류 발생: {Error}", - "message": "Failed to encode the config: {Error}", - "placeholder": null - }, - { - "id": "Failed to generate default config: {Error}", - "translation": "기본 구성 생성 실패: {Error}", - "message": "Failed to generate default config: {Error}", - "placeholder": null - }, - { - "id": "Failed to inset 'base' config layer in database: {Error}", - "translation": "데이터베이스에 'base' 구성 레이어 삽입 실패: {Error}", - "message": "Failed to inset 'base' config layer in database: {Error}", - "placeholder": null - }, - { - "id": "Failed to inset '{String}' config layer in database: {Error}", - "translation": "데이터베이스에 '{String}' 구성 레이어 삽입 실패: {Error}", - "message": "Failed to inset '{String}' config layer in database: {Error}", - "placeholder": null - }, - { - "id": "New Curio configuration layer '{String}' created", - "translation": "새로운 Curio 구성 레이어 '{String}'가 생성되었습니다", - "message": "New Curio configuration layer '{String}' created", - "placeholder": null - }, - { - "id": "The Curio team wants to improve the software you use. Tell the team you're using `{Curio}`.", - "translation": "Curio 팀은 당신이 사용하는 소프트웨어를 개선하고자 합니다. 팀에게 `{Curio}`를 사용 중이라고 알려주세요.", - "message": "The Curio team wants to improve the software you use. Tell the team you're using `{Curio}`.", - "placeholder": null - }, - { - "id": "Confidence epochs: {Confidence}", - "translation": "신뢰 에포크: {Confidence}", - "message": "Confidence epochs: {Confidence}", - "placeholder": null - }, - { - "id": "Failed to generate random bytes for secret: {Error}", - "translation": "비밀번호를 위한 랜덤 바이트 생성에 실패했습니다: {Error}", - "message": "Failed to generate random bytes for secret: {Error}", - "placeholder": null - }, - { - "id": "Failed to get API info for FullNode: {Err}", - "translation": "FullNode에 대한 API 정보를 가져오는 데 실패했습니다: {Err}", - "message": "Failed to get API info for FullNode: {Err}", - "placeholder": null - }, - { - "id": "Failed to insert 'base' config layer in database: {Error}", - "translation": "데이터베이스에 'base' 구성 레이어를 삽입하는 데 실패했습니다: {Error}", - "message": "Failed to insert 'base' config layer in database: {Error}", - "placeholder": null - }, - { - "id": "Failed to insert '{String}' config layer in database: {Error}", - "translation": "데이터베이스에 '{String}' 구성 레이어를 삽입하는 데 실패했습니다: {Error}", - "message": "Failed to insert '{String}' config layer in database: {Error}", - "placeholder": null - }, - { - "id": "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster \u003c miner ID \u003e' to finish the configuration.", - "translation": "이 프로세스는 부분적으로 항등원적입니다. 새로운 채굴자 액터가 생성되었고 후속 단계가 실패하는 경우 사용자는 구성을 완료하기 위해 'curio config new-cluster \u003c 채굴자 ID \u003e'를 실행해야 합니다.", - "message": "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster \u003c miner ID \u003e' to finish the configuration.", - "placeholder": null - }, - { - "id": "Confidence epochs", - "translation": "신뢰 에포크", - "message": "Confidence epochs", - "placeholder": null - }, - { - "id": "Increase reliability using redundancy: start multiple machines with at-least the post layer: 'curio run --layers=post'", - "translation": "신뢰성 향상을 위한 중복성 사용: 적어도 post 레이어를 사용하여 여러 대의 기계를 시작하십시오: 'curio run --layers=post'", - "message": "Increase reliability using redundancy: start multiple machines with at-least the post layer: 'curio run --layers=post'", - "placeholder": null - }, - { - "id": "I want to:", - "translation": "나는 원한다:", - "message": "I want to:", - "placeholder": null - }, - { - "id": "Configuration 'base' was updated to include this miner's address", - "translation": "이 마이너 주소를 포함한 구성 'base'가 업데이트되었습니다.", - "message": "Configuration 'base' was updated to include this miner's address", - "placeholder": null - }, - { - "id": "Cannot load base config: {Error}", - "translation": "기본 구성을 불러올 수 없습니다: {Error}", - "message": "Cannot load base config: {Error}", - "placeholder": null - }, - { - "id": "Failed to load base config: {Error}", - "translation": "기본 구성을 로드하는 데 실패했습니다: {Error}", - "message": "Failed to load base config: {Error}", - "placeholder": null - }, - { - "id": "Failed to regenerate base config: {Error}", - "translation": "기본 구성을 재생성하는 데 실패했습니다: {Error}", - "message": "Failed to regenerate base config: {Error}", - "placeholder": null - }, - { - "id": "Failed to load base config from database: {Error}", - "translation": "데이터베이스에서 기본 구성을 로드하는 데 실패했습니다: {Error}", - "message": "Failed to load base config from database: {Error}", - "placeholder": null - }, - { - "id": "Failed to parse base config: {Error}", - "translation": "기본 구성을 구문 분석하는 데 실패했습니다: {Error}", - "message": "Failed to parse base config: {Error}", - "placeholder": null - }, - { - "id": "Try the web interface with {Rendercurio_run___layersgui} for further guided improvements.", - "translation": "{Rendercurio_run___layersgui}를 사용하여 웹 인터페이스를 시도하고 더 나은 안내된 개선을 진행하세요.", - "message": "Try the web interface with {Rendercurio_run___layersgui} for further guided improvements.", - "placeholder": null - }, - { - "id": "Now shut down lotus-miner and lotus-worker and use run {Rendercurio_run} instead.", - "translation": "이제 lotus-miner와 lotus-worker를 종료하고 {Rendercurio_run}을 실행하세요.", - "message": "Now shut down lotus-miner and lotus-worker and use run {Rendercurio_run} instead.", - "placeholder": null - }, - { - "id": "Configuration 'base' was updated to include this miner's address ({MinerAddress}) and its wallet setup.", - "translation": "기본 설정 'base'가 이 마이너의 주소({MinerAddress}) 및 지갑 설정을 포함하도록 업데이트되었습니다.", - "message": "Configuration 'base' was updated to include this miner's address ({MinerAddress}) and its wallet setup.", - "placeholder": null - }, - { - "id": "Configuration 'base' was created to resemble this lotus-miner's config.toml .", - "translation": "'base' 설정이 이 lotus-miner의 config.toml과 유사하게 만들어졌습니다.", - "message": "Configuration 'base' was created to resemble this lotus-miner's config.toml .", - "placeholder": null - } - ] -} \ No newline at end of file diff --git a/cmd/curio/internal/translations/locales/ko/out.gotext.json b/cmd/curio/internal/translations/locales/ko/out.gotext.json deleted file mode 100644 index 8e0014cd46d..00000000000 --- a/cmd/curio/internal/translations/locales/ko/out.gotext.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "language": "ko", - "messages": [] - } diff --git a/cmd/curio/internal/translations/locales/zh/messages.gotext.json b/cmd/curio/internal/translations/locales/zh/messages.gotext.json deleted file mode 100644 index 1e2608fa8d7..00000000000 --- a/cmd/curio/internal/translations/locales/zh/messages.gotext.json +++ /dev/null @@ -1,1076 +0,0 @@ -{ - "language": "zh", - "messages": [ - { - "id": "This interactive tool will walk you through migration of Curio.\nPress Ctrl+C to exit at any time.", - "message": "This interactive tool will walk you through migration of Curio.\nPress Ctrl+C to exit at any time.", - "translation": "此互动工具将引导您完成Curio的迁移。\n随时按Ctrl+C退出。" - }, - { - "id": "This tool confirms each action it does.", - "message": "This tool confirms each action it does.", - "translation": "此工具确认其执行的每个操作。" - }, - { - "id": "Ctrl+C pressed in Terminal", - "message": "Ctrl+C pressed in Terminal", - "translation": "在终端中按下Ctrl+C" - }, - { - "id": "Verifying Sectors exist in Yugabyte.", - "message": "Verifying Sectors exist in Yugabyte.", - "translation": "正在验证Yugabyte中的扇区是否存在。" - }, - { - "id": "Error verifying sectors: {Error}", - "message": "Error verifying sectors: {Error}", - "translation": "验证扇区时出错:{Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Sectors verified. {I} sectors found.", - "message": "Sectors verified. {I} sectors found.", - "translation": "已验证扇区。找到了{I}个扇区。", - "placeholders": [ - { - "id": "I", - "string": "%[1]d", - "type": "[]int", - "underlyingType": "[]int", - "argNum": 1, - "expr": "i" - } - ] - }, - { - "id": "Never remove the database info from the config.toml for lotus-miner as it avoids double PoSt.", - "message": "Never remove the database info from the config.toml for lotus-miner as it avoids double PoSt.", - "translation": "从config.toml中永远不要删除lotus-miner的数据库信息,因为它避免了双PoSt。" - }, - { - "id": "Enter the info to connect to your Yugabyte database installation (https://download.yugabyte.com/)", - "message": "Enter the info to connect to your Yugabyte database installation (https://download.yugabyte.com/)", - "translation": "输入连接到您的Yugabyte数据库安装的信息(https://download.yugabyte.com/)" - }, - { - "id": "Host: {Hosts_}", - "message": "Host: {Hosts_}", - "translation": "主机:{Hosts_}", - "placeholders": [ - { - "id": "Hosts_", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "strings.Join(harmonycfg.Hosts, \",\")" - } - ] - }, - { - "id": "Port: {Port}", - "message": "Port: {Port}", - "translation": "端口:{Port}", - "placeholders": [ - { - "id": "Port", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "harmonycfg.Port" - } - ] - }, - { - "id": "Username: {Username}", - "message": "Username: {Username}", - "translation": "用户名:{Username}", - "placeholders": [ - { - "id": "Username", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "harmonycfg.Username" - } - ] - }, - { - "id": "Password: {Password}", - "message": "Password: {Password}", - "translation": "密码:{Password}", - "placeholders": [ - { - "id": "Password", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "harmonycfg.Password" - } - ] - }, - { - "id": "Database: {Database}", - "message": "Database: {Database}", - "translation": "数据库:{Database}", - "placeholders": [ - { - "id": "Database", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "harmonycfg.Database" - } - ] - }, - { - "id": "Continue to connect and update schema.", - "message": "Continue to connect and update schema.", - "translation": "继续连接和更新架构。" - }, - { - "id": "Database config error occurred, abandoning migration: {Error}", - "message": "Database config error occurred, abandoning migration: {Error}", - "translation": "发生数据库配置错误,放弃迁移:{Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Enter the Yugabyte database host(s)", - "message": "Enter the Yugabyte database host(s)", - "translation": "输入Yugabyte数据库主机(S)" - }, - { - "id": "No host provided", - "message": "No host provided", - "translation": "未提供主机" - }, - { - "id": "Enter the Yugabyte database {Stringport_username_password_databasei_1}", - "message": "Enter the Yugabyte database {Stringport_username_password_databasei_1}", - "translation": "输入Yugabyte数据库 {Stringport_username_password_databasei_1}", - "placeholders": [ - { - "id": "Stringport_username_password_databasei_1", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "[]string{\"port\", \"username\", \"password\", \"database\"}[i-1]" - } - ] - }, - { - "id": "No value provided", - "message": "No value provided", - "translation": "未提供值" - }, - { - "id": "Error connecting to Yugabyte database: {Error}", - "message": "Error connecting to Yugabyte database: {Error}", - "translation": "连接到Yugabyte数据库时出错:{Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Connected to Yugabyte. Schema is current.", - "message": "Connected to Yugabyte. Schema is current.", - "translation": "已连接到Yugabyte。模式是当前的。" - }, - { - "id": "Error encoding config.toml: {Error}", - "message": "Error encoding config.toml: {Error}", - "translation": "编码config.toml时出错:{Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Error reading filemode of config.toml: {Error}", - "message": "Error reading filemode of config.toml: {Error}", - "translation": "读取config.toml文件模式时出错:{Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Error writing config.toml: {Error}", - "message": "Error writing config.toml: {Error}", - "translation": "写入config.toml时出错:{Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Restart Lotus Miner.", - "message": "Restart Lotus Miner.", - "translation": "重新启动Lotus Miner。" - }, - { - "id": "Connected to Yugabyte", - "message": "Connected to Yugabyte", - "translation": "已连接到Yugabyte" - }, - { - "id": "Select the location of your lotus-miner config directory?", - "message": "Select the location of your lotus-miner config directory?", - "translation": "选择您的lotus-miner配置目录的位置?" - }, - { - "id": "Other", - "message": "Other", - "translation": "其他" - }, - { - "id": "Enter the path to the configuration directory used by lotus-miner", - "message": "Enter the path to the configuration directory used by lotus-miner", - "translation": "输入lotus-miner使用的配置目录的路径" - }, - { - "id": "No path provided, abandoning migration", - "message": "No path provided, abandoning migration", - "translation": "未提供路径,放弃迁移" - }, - { - "id": "Cannot read the config.toml file in the provided directory, Error: {Error}", - "message": "Cannot read the config.toml file in the provided directory, Error: {Error}", - "translation": "无法读取提供的目录中的config.toml文件,错误:{Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Read Miner Config", - "message": "Read Miner Config", - "translation": "读取矿工配置" - }, - { - "id": "Completed Step: {Step}", - "message": "Completed Step: {Step}", - "translation": "完成步骤:{Step}", - "placeholders": [ - { - "id": "Step", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "step" - } - ] - }, - { - "id": "This interactive tool migrates lotus-miner to Curio in 5 minutes.", - "translation": "这个交互式工具可以在5分钟内将lotus-miner迁移到Curio。", - "message": "This interactive tool migrates lotus-miner to Curio in 5 minutes.", - "placeholder": null - }, - { - "id": "Each step needs your confirmation and can be reversed. Press Ctrl+C to exit at any time.", - "translation": "每一步都需要您的确认,并且可以撤销。随时按Ctrl+C退出。", - "message": "Each step needs your confirmation and can be reversed. Press Ctrl+C to exit at any time.", - "placeholder": null - }, - { - "id": "Use the arrow keys to navigate: ↓ ↑ → ←", - "translation": "使用箭头键进行导航:↓ ↑ → ←", - "message": "Use the arrow keys to navigate: ↓ ↑ → ←", - "placeholder": null - }, - { - "id": "Lotus-Miner to Curio Migration.", - "translation": "Lotus-Miner到Curio迁移。", - "message": "Lotus-Miner to Curio Migration.", - "placeholder": null - }, - { - "id": "Try the web interface with for further guided improvements.", - "translation": "尝试使用网页界面进行进一步的指导改进。", - "message": "Try the web interface with for further guided improvements.", - "placeholder": null - }, - { - "id": "You can now migrate your market node ({Boost}), if applicable.", - "translation": "如果适用,您现在可以迁移您的市场节点({Boost})。", - "message": "You can now migrate your market node ({Boost}), if applicable.", - "placeholder": null - }, - { - "id": "Migrating config.toml to database.", - "translation": "正在将config.toml迁移到数据库。", - "message": "Migrating config.toml to database.", - "placeholder": null - }, - { - "id": "Error reading from database: {Error}. Aborting Migration.", - "translation": "读取数据库时出错:{Error}。正在中止迁移。", - "message": "Error reading from database: {Error}. Aborting Migration.", - "placeholder": null - }, - { - "id": "cannot read API: {Error}. Aborting Migration", - "translation": "无法读取API:{Error}。正在中止迁移", - "message": "cannot read API: {Error}. Aborting Migration", - "placeholder": null - }, - { - "id": "Error saving config to layer: {Error}. Aborting Migration", - "translation": "保存配置到层时出错:{Error}。正在中止迁移", - "message": "Error saving config to layer: {Error}. Aborting Migration", - "placeholder": null - }, - { - "id": "Protocol Labs wants to improve the software you use. Tell the team you're using Curio.", - "translation": "Protocol Labs希望改进您使用的软件。告诉团队您正在使用Curio。", - "message": "Protocol Labs wants to improve the software you use. Tell the team you're using Curio.", - "placeholder": null - }, - { - "id": "Select what you want to share with the Curio team.", - "translation": "选择您想与Curio团队分享的内容。", - "message": "Select what you want to share with the Curio team.", - "placeholder": null - }, - { - "id": "Individual Data: Miner ID, Curio version, net ({Mainnet} or {Testnet}). Signed.", - "translation": "个人数据:矿工ID、Curio版本、网络({Mainnet}或{Testnet})。已签名。", - "message": "Individual Data: Miner ID, Curio version, net ({Mainnet} or {Testnet}). Signed.", - "placeholder": null - }, - { - "id": "Aggregate-Anonymous: version, net, and Miner power (bucketed).", - "translation": "聚合-匿名:版本、网络和矿工功率(分桶)。", - "message": "Aggregate-Anonymous: version, net, and Miner power (bucketed).", - "placeholder": null - }, - { - "id": "Hint: I am someone running Curio on net.", - "translation": "提示:我是在网络上运行Curio的人。", - "message": "Hint: I am someone running Curio on net.", - "placeholder": null - }, - { - "id": "Nothing.", - "translation": "没有。", - "message": "Nothing.", - "placeholder": null - }, - { - "id": "Aborting remaining steps.", - "translation": "中止剩余步骤。", - "message": "Aborting remaining steps.", - "placeholder": null - }, - { - "id": "Error connecting to lotus node: {Error}", - "translation": "连接到莲花节点时出错:{Error}", - "message": "Error connecting to lotus node: {Error}", - "placeholder": null - }, - { - "id": "Error getting miner power: {Error}", - "translation": "获取矿工功率时出错:{Error}", - "message": "Error getting miner power: {Error}", - "placeholder": null - }, - { - "id": "Error marshalling message: {Error}", - "translation": "整理消息时出错:{Error}", - "message": "Error marshalling message: {Error}", - "placeholder": null - }, - { - "id": "Error getting miner info: {Error}", - "translation": "获取矿工信息时出错:{Error}", - "message": "Error getting miner info: {Error}", - "placeholder": null - }, - { - "id": "Error signing message: {Error}", - "translation": "签署消息时出错:{Error}", - "message": "Error signing message: {Error}", - "placeholder": null - }, - { - "id": "Error sending message: {Error}", - "translation": "发送消息时出错:{Error}", - "message": "Error sending message: {Error}", - "placeholder": null - }, - { - "id": "Error sending message: Status {Status}, Message:", - "translation": "发送消息时出错:状态{Status},消息:", - "message": "Error sending message: Status {Status}, Message:", - "placeholder": null - }, - { - "id": "Message sent.", - "translation": "消息已发送。", - "message": "Message sent.", - "placeholder": null - }, - { - "id": "Documentation:", - "translation": "文档:", - "message": "Documentation:", - "placeholder": null - }, - { - "id": "The '{Base}' layer stores common configuration. All curio instances can include it in their {__layers} argument.", - "translation": "'{Base}'层存储通用配置。所有Curio实例都可以在其{__layers}参数中包含它。", - "message": "The '{Base}' layer stores common configuration. All curio instances can include it in their {__layers} argument.", - "placeholder": null - }, - { - "id": "You can add other layers for per-machine configuration changes.", - "translation": "您可以添加其他层进行每台机器的配置更改。", - "message": "You can add other layers for per-machine configuration changes.", - "placeholder": null - }, - { - "id": "Join {Fil_curio_help} in Filecoin {Slack} for help.", - "translation": "加入Filecoin {Slack}中的{Fil_curio_help}寻求帮助。", - "message": "Join {Fil_curio_help} in Filecoin {Slack} for help.", - "placeholder": null - }, - { - "id": "Join {Fil_curio_dev} in Filecoin {Slack} to follow development and feedback!", - "translation": "加入Filecoin {Slack}中的{Fil_curio_dev}来跟踪开发和反馈!", - "message": "Join {Fil_curio_dev} in Filecoin {Slack} to follow development and feedback!", - "placeholder": null - }, - { - "id": "Want PoST redundancy? Run many Curio instances with the '{Post}' layer.", - "translation": "需要PoST冗余?使用'{Post}'层运行多个Curio实例。", - "message": "Want PoST redundancy? Run many Curio instances with the '{Post}' layer.", - "placeholder": null - }, - { - "id": "Point your browser to your web GUI to complete setup with {Boost} and advanced featues.", - "translation": "将您的浏览器指向您的网络GUI,以使用{Boost}和高级功能完成设置。", - "message": "Point your browser to your web GUI to complete setup with {Boost} and advanced featues.", - "placeholder": null - }, - { - "id": "For SPs with multiple Miner IDs, run 1 migration per lotus-miner all to the same 1 database. The cluster will serve all Miner IDs.", - "translation": "对于具有多个矿工ID的SP,针对所有lotus-miner运行1次迁移到同一个数据库。集群将服务所有矿工ID。", - "message": "For SPs with multiple Miner IDs, run 1 migration per lotus-miner all to the same 1 database. The cluster will serve all Miner IDs.", - "placeholder": null - }, - { - "id": "Please start {Lotus_miner} now that database credentials are in {Toml}.", - "translation": "现在数据库凭证在{Toml}中,请启动{Lotus_miner}。", - "message": "Please start {Lotus_miner} now that database credentials are in {Toml}.", - "placeholder": null - }, - { - "id": "Waiting for {Lotus_miner} to write sectors into Yugabyte.", - "translation": "等待{Lotus_miner}将扇区写入Yugabyte。", - "message": "Waiting for {Lotus_miner} to write sectors into Yugabyte.", - "placeholder": null - }, - { - "id": "The sectors are in the database. The database is ready for {Curio}.", - "translation": "扇区在数据库中。数据库已准备好用于{Curio}。", - "message": "The sectors are in the database. The database is ready for {Curio}.", - "placeholder": null - }, - { - "id": "Now shut down lotus-miner and move the systems to {Curio}.", - "translation": "现在关闭lotus-miner并将系统移至{Curio}。", - "message": "Now shut down lotus-miner and move the systems to {Curio}.", - "placeholder": null - }, - { - "id": "Press return to continue", - "translation": "按回车继续", - "message": "Press return to continue", - "placeholder": null - }, - { - "id": "Aborting migration.", - "translation": "中止迁移。", - "message": "Aborting migration.", - "placeholder": null - }, - { - "id": "Sectors verified. {I} sector locations found.", - "translation": "扇区已验证。发现了{I}个扇区位置。", - "message": "Sectors verified. {I} sector locations found.", - "placeholder": null - }, - { - "id": "Press return to update {Toml} with Yugabyte info. Backup the file now.", - "translation": "按回车更新{Toml}以获取Yugabyte信息。现在备份文件。", - "message": "Press return to update {Toml} with Yugabyte info. Backup the file now.", - "placeholder": null - }, - { - "id": "To start, ensure your sealing pipeline is drained and shut-down lotus-miner.", - "translation": "开始之前,请确保您的密封管道已排空并关闭lotus-miner。", - "message": "To start, ensure your sealing pipeline is drained and shut-down lotus-miner.", - "placeholder": null - }, - { - "id": "Enter the path to the configuration directory used by {Lotus_miner}", - "translation": "输入{Lotus_miner}使用的配置目录的路径", - "message": "Enter the path to the configuration directory used by {Lotus_miner}", - "placeholder": null - }, - { - "id": "Step Complete: {Step}", - "translation": "步骤完成:{Step}", - "message": "Step Complete: {Step}", - "placeholder": null - }, - { - "id": "Configuration 'base' was updated to include this miner's address and its wallet setup.", - "translation": "配置'base'已更新,包含了这个矿工的地址和其钱包设置。", - "message": "Configuration 'base' was updated to include this miner's address and its wallet setup.", - "placeholder": null - }, - { - "id": "Compare the configurations {Base} to {MinerAddresses0}. Changes between the miner IDs other than wallet addreses should be a new, minimal layer for runners that need it.", - "translation": "比较配置{Base}和{MinerAddresses0}。矿工ID之间除了钱包地址的变化应该是需要的运行者的一个新的、最小的层。", - "message": "Compare the configurations {Base} to {MinerAddresses0}. Changes between the miner IDs other than wallet addreses should be a new, minimal layer for runners that need it.", - "placeholder": null - }, - { - "id": "Configuration 'base' was created to include this miner's address and its wallet setup.", - "translation": "配置'base'已创建,包括了这个矿工的地址和其钱包设置。", - "message": "Configuration 'base' was created to include this miner's address and its wallet setup.", - "placeholder": null - }, - { - "id": "Layer {LayerName} created.", - "translation": "层{LayerName}已创建。", - "message": "Layer {LayerName} created.", - "placeholder": null - }, - { - "id": "To work with the config: \\n", - "translation": "要使用配置:\\n", - "message": "To work with the config: \\n", - "placeholder": null - }, - { - "id": "To run Curio: With machine or cgroup isolation, use the command (with example layer selection):", - "translation": "运行Curio:使用机器或cgroup隔离,使用命令(附带示例层选择):", - "message": "To run Curio: With machine or cgroup isolation, use the command (with example layer selection):", - "placeholder": null - }, - { - "id": "Try the web interface with {__layersgui} for further guided improvements.", - "translation": "尝试使用{__layersgui}的Web界面进行进一步引导式改进。", - "message": "Try the web interface with {__layersgui} for further guided improvements.", - "placeholder": null - }, - { - "id": "Error connecting to lotus node: {Error} {Error_1}", - "translation": "连接到lotus节点时出错:{Error} {Error_1}", - "message": "Error connecting to lotus node: {Error} {Error_1}", - "placeholder": null - }, - { - "id": "could not get API info for FullNode: {Err}", - "translation": "无法获取FullNode的API信息:{Err}", - "message": "could not get API info for FullNode: {Err}", - "placeholder": null - }, - { - "id": "Error getting token: {Error}", - "translation": "获取令牌时出错:{Error}", - "message": "Error getting token: {Error}", - "placeholder": null - }, - { - "id": "Filecoin {Slack} channels: {Fil_curio_help} and {Fil_curio_dev}", - "translation": "Filecoin {Slack} 频道:{Fil_curio_help} 和 {Fil_curio_dev}", - "message": "Filecoin {Slack} channels: {Fil_curio_help} and {Fil_curio_dev}", - "placeholder": null - }, - { - "id": "Start multiple Curio instances with the '{Post}' layer to redundancy.", - "translation": "使用'{Post}'层启动多个Curio实例以实现冗余。", - "message": "Start multiple Curio instances with the '{Post}' layer to redundancy.", - "placeholder": null - }, - { - "id": "One database can serve multiple miner IDs: Run a migration for each lotus-miner.", - "translation": "一个数据库可以服务多个矿工ID:为每个lotus-miner运行迁移。", - "message": "One database can serve multiple miner IDs: Run a migration for each lotus-miner.", - "placeholder": null - }, - { - "id": "Please start (or restart) {Lotus_miner} now that database credentials are in {Toml}.", - "translation": "请立即启动(或重新启动){Lotus_miner},因为数据库凭据已在{Toml}中。", - "message": "Please start (or restart) {Lotus_miner} now that database credentials are in {Toml}.", - "placeholder": null - }, - { - "id": "Error interpreting miner ID: {Error}: ID: {String}", - "translation": "解释矿工ID时出错:{Error}:ID:{String}", - "message": "Error interpreting miner ID: {Error}: ID: {String}", - "placeholder": null - }, - { - "id": "Enabling Sector Indexing in the database.", - "translation": "在数据库中启用扇区索引。", - "message": "Enabling Sector Indexing in the database.", - "placeholder": null - }, - { - "id": "Error expanding path: {Error}", - "translation": "扩展路径时出错:{Error}", - "message": "Error expanding path: {Error}", - "placeholder": null - }, - { - "id": "Could not create repo from directory: {Error}. Aborting migration", - "translation": "无法从目录创建repo:{Error}。 中止迁移", - "message": "Could not create repo from directory: {Error}. Aborting migration", - "placeholder": null - }, - { - "id": "Could not lock miner repo. Your miner must be stopped: {Error}\n Aborting migration", - "translation": "无法锁定矿工repo。 您的矿工必须停止:{Error}\n 中止迁移", - "message": "Could not lock miner repo. Your miner must be stopped: {Error}\n Aborting migration", - "placeholder": null - }, - { - "id": "To work with the config:", - "translation": "要使用配置:", - "message": "To work with the config:", - "placeholder": null - }, - { - "id": "This interactive tool creates a new miner actor and creates the basic configuration layer for it.", - "translation": "此交互式工具将创建一个新的矿工角色,并为其创建基本配置层。", - "message": "This interactive tool creates a new miner actor and creates the basic configuration layer for it.", - "placeholder": null - }, - { - "id": "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster {Arg_1}' to finish the configuration.", - "translation": "此过程在某种程度上是幂等的。一旦创建了新的矿工角色,并且后续步骤失败,用户需要运行'curio config new-cluster {Arg_1}'来完成配置。", - "message": "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster {Arg_1}' to finish the configuration.", - "placeholder": null - }, - { - "id": "Choose if you with to create a new miner or migrate from existing Lotus-Miner", - "translation": "选择您是否要创建新矿工或从现有的 Lotus-Miner 迁移", - "message": "Choose if you with to create a new miner or migrate from existing Lotus-Miner", - "placeholder": null - }, - { - "id": "Migrate from existing Lotus-Miner", - "translation": "从现有的 Lotus-Miner 迁移", - "message": "Migrate from existing Lotus-Miner", - "placeholder": null - }, - { - "id": "Create a new miner", - "translation": "创建一个新的矿工", - "message": "Create a new miner", - "placeholder": null - }, - { - "id": "New Miner initialization complete.", - "translation": "新矿工初始化完成。", - "message": "New Miner initialization complete.", - "placeholder": null - }, - { - "id": "Migrating lotus-miner config.toml to Curio in-database configuration.", - "translation": "将 lotus-miner config.toml 迁移到 Curio 的数据库配置中。", - "message": "Migrating lotus-miner config.toml to Curio in-database configuration.", - "placeholder": null - }, - { - "id": "Error getting API: {Error}", - "translation": "获取 API 时出错:{Error}", - "message": "Error getting API: {Error}", - "placeholder": null - }, - { - "id": "The Curio team wants to improve the software you use. Tell the team you're using `{Curio}`.", - "translation": "Curio 团队希望改进您使用的软件。告诉团队您正在使用 `{Curio}`。", - "message": "The Curio team wants to improve the software you use. Tell the team you're using `{Curio}`.", - "placeholder": null - }, - { - "id": "Individual Data: Miner ID, Curio version, chain ({Mainnet} or {Calibration}). Signed.", - "translation": "个人数据:矿工 ID,Curio 版本,链({Mainnet} 或 {Calibration})。签名。", - "message": "Individual Data: Miner ID, Curio version, chain ({Mainnet} or {Calibration}). Signed.", - "placeholder": null - }, - { - "id": "Aggregate-Anonymous: version, chain, and Miner power (bucketed).", - "translation": "聚合-匿名:版本,链和矿工算力(分桶)。", - "message": "Aggregate-Anonymous: version, chain, and Miner power (bucketed).", - "placeholder": null - }, - { - "id": "Hint: I am someone running Curio on whichever chain.", - "translation": "提示:我是在任何链上运行 Curio 的人。", - "message": "Hint: I am someone running Curio on whichever chain.", - "placeholder": null - }, - { - "id": "Press return to update {Toml} with Yugabyte info. A Backup file will be written to that folder before changes are made.", - "translation": "按回车键更新 {Toml} 以包含 Yugabyte 信息。在进行更改之前,将在该文件夹中写入备份文件。", - "message": "Press return to update {Toml} with Yugabyte info. A Backup file will be written to that folder before changes are made.", - "placeholder": null - }, - { - "id": "Error creating backup file: {Error}", - "translation": "创建备份文件时出错:{Error}", - "message": "Error creating backup file: {Error}", - "placeholder": null - }, - { - "id": "Error reading config.toml: {Error}", - "translation": "读取 config.toml 时出错:{Error}", - "message": "Error reading config.toml: {Error}", - "placeholder": null - }, - { - "id": "Error writing backup file: {Error}", - "translation": "写入备份文件时出错:{Error}", - "message": "Error writing backup file: {Error}", - "placeholder": null - }, - { - "id": "Error closing backup file: {Error}", - "translation": "关闭备份文件时出错:{Error}", - "message": "Error closing backup file: {Error}", - "placeholder": null - }, - { - "id": "Initializing a new miner actor.", - "translation": "初始化新的矿工角色。", - "message": "Initializing a new miner actor.", - "placeholder": null - }, - { - "id": "Enter the info to create a new miner", - "translation": "输入创建新矿工所需的信息", - "message": "Enter the info to create a new miner", - "placeholder": null - }, - { - "id": "Owner Address: {String}", - "translation": "所有者地址:{String}", - "message": "Owner Address: {String}", - "placeholder": null - }, - { - "id": "Worker Address: {String}", - "translation": "工作地址:{String}", - "message": "Worker Address: {String}", - "placeholder": null - }, - { - "id": "Sender Address: {String}", - "translation": "发送者地址:{String}", - "message": "Sender Address: {String}", - "placeholder": null - }, - { - "id": "Sector Size: {Ssize}", - "translation": "扇区大小: {Ssize}", - "message": "Sector Size: {Ssize}", - "placeholder": null - }, - { - "id": "Confidence epochs: {Confidence}", - "translation": "置信度时期: {Confidence}", - "message": "Confidence epochs: {Confidence}", - "placeholder": null - }, - { - "id": "Continue to verify the addresses and create a new miner actor.", - "translation": "继续验证地址并创建新的矿工角色。", - "message": "Continue to verify the addresses and create a new miner actor.", - "placeholder": null - }, - { - "id": "Miner creation error occurred: {Error}", - "translation": "矿工创建错误发生: {Error}", - "message": "Miner creation error occurred: {Error}", - "placeholder": null - }, - { - "id": "Enter the owner address", - "translation": "输入所有者地址", - "message": "Enter the owner address", - "placeholder": null - }, - { - "id": "No address provided", - "translation": "未提供地址", - "message": "No address provided", - "placeholder": null - }, - { - "id": "Failed to parse the address: {Error}", - "translation": "解析地址失败: {Error}", - "message": "Failed to parse the address: {Error}", - "placeholder": null - }, - { - "id": "Enter {Stringworker_senderi_1} address", - "translation": "输入 {Stringworker_senderi_1} 地址", - "message": "Enter {Stringworker_senderi_1} address", - "placeholder": null - }, - { - "id": "Enter the sector size", - "translation": "输入扇区大小", - "message": "Enter the sector size", - "placeholder": null - }, - { - "id": "Failed to parse sector size: {Error}", - "translation": "解析扇区大小失败: {Error}", - "message": "Failed to parse sector size: {Error}", - "placeholder": null - }, - { - "id": "Enter the confidence", - "translation": "输入置信度", - "message": "Enter the confidence", - "placeholder": null - }, - { - "id": "Failed to parse confidence: {Error}", - "translation": "解析置信度失败: {Error}", - "message": "Failed to parse confidence: {Error}", - "placeholder": null - }, - { - "id": "Failed to create the miner actor: {Error}", - "translation": "创建矿工角色失败: {Error}", - "message": "Failed to create the miner actor: {Error}", - "placeholder": null - }, - { - "id": "Miner {String} created successfully", - "translation": "矿工 {String} 创建成功", - "message": "Miner {String} created successfully", - "placeholder": null - }, - { - "id": "Cannot reach the DB: {Error}", - "translation": "无法访问数据库: {Error}", - "message": "Cannot reach the DB: {Error}", - "placeholder": null - }, - { - "id": "Error connecting to full node API: {Error}", - "translation": "连接到完整节点 API 时发生错误: {Error}", - "message": "Error connecting to full node API: {Error}", - "placeholder": null - }, - { - "id": "Pre-initialization steps complete", - "translation": "预初始化步骤完成", - "message": "Pre-initialization steps complete", - "placeholder": null - }, - { - "id": "Failed to generate random bytes for secret: {Error}", - "translation": "生成密码的随机字节失败: {Error}", - "message": "Failed to generate random bytes for secret: {Error}", - "placeholder": null - }, - { - "id": "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster {String}' to finish the configuration", - "translation": "请不要再次运行引导设置,因为矿工创建不是幂等的。 您需要运行 'curio config new-cluster {String}' 来完成配置。", - "message": "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster {String}' to finish the configuration", - "placeholder": null - }, - { - "id": "Failed to get API info for FullNode: {Err}", - "translation": "无法获取 FullNode 的 API 信息: {Err}", - "message": "Failed to get API info for FullNode: {Err}", - "placeholder": null - }, - { - "id": "Failed to verify the auth token from daemon node: {Error}", - "translation": "无法验证来自守护进程节点的授权令牌: {Error}", - "message": "Failed to verify the auth token from daemon node: {Error}", - "placeholder": null - }, - { - "id": "Failed to encode the config: {Error}", - "translation": "无法编码配置: {Error}", - "message": "Failed to encode the config: {Error}", - "placeholder": null - }, - { - "id": "Failed to generate default config: {Error}", - "translation": "无法生成默认配置: {Error}", - "message": "Failed to generate default config: {Error}", - "placeholder": null - }, - { - "id": "Failed to insert 'base' config layer in database: {Error}", - "translation": "无法将 'base' 配置层插入数据库: {Error}", - "message": "Failed to insert 'base' config layer in database: {Error}", - "placeholder": null - }, - { - "id": "Failed to insert '{String}' config layer in database: {Error}", - "translation": "无法将 '{String}' 配置层插入数据库: {Error}", - "message": "Failed to insert '{String}' config layer in database: {Error}", - "placeholder": null - }, - { - "id": "New Curio configuration layer '{String}' created", - "translation": "新的 Curio 配置层 '{String}' 已创建", - "message": "New Curio configuration layer '{String}' created", - "placeholder": null - }, - { - "id": "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster \u003c miner ID \u003e' to finish the configuration.", - "translation": "该过程部分幂等。一旦创建了新的矿工角色,并且随后的步骤失败,用户需要运行 'curio config new-cluster \u003c 矿工 ID \u003e' 来完成配置。", - "message": "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster \u003c miner ID \u003e' to finish the configuration.", - "placeholder": null - }, - { - "id": "Confidence epochs", - "translation": "置信度时期", - "message": "Confidence epochs", - "placeholder": null - }, - { - "id": "Increase reliability using redundancy: start multiple machines with at-least the post layer: 'curio run --layers=post'", - "translation": "通过冗余增加可靠性:使用至少后层启动多台机器:'curio run --layers=post'", - "message": "Increase reliability using redundancy: start multiple machines with at-least the post layer: 'curio run --layers=post'", - "placeholder": null - }, - { - "id": "I want to:", - "translation": "我想要:", - "message": "I want to:", - "placeholder": null - }, - { - "id": "Configuration 'base' was updated to include this miner's address", - "translation": "配置 'base' 已更新以包含此矿工的地址", - "message": "Configuration 'base' was updated to include this miner's address", - "placeholder": null - }, - { - "id": "Cannot load base config: {Error}", - "translation": "无法加载基本配置: {Error}", - "message": "Cannot load base config: {Error}", - "placeholder": null - }, - { - "id": "Failed to load base config: {Error}", - "translation": "加载基本配置失败: {Error}", - "message": "Failed to load base config: {Error}", - "placeholder": null - }, - { - "id": "Failed to regenerate base config: {Error}", - "translation": "重新生成基本配置失败: {Error}", - "message": "Failed to regenerate base config: {Error}", - "placeholder": null - }, - { - "id": "Failed to load base config from database: {Error}", - "translation": "从数据库加载基本配置失败:{Error}", - "message": "Failed to load base config from database: {Error}", - "placeholder": null - }, - { - "id": "Failed to parse base config: {Error}", - "translation": "解析基本配置失败:{Error}", - "message": "Failed to parse base config: {Error}", - "placeholder": null - }, - { - "id": "Try the web interface with {Rendercurio_run___layersgui} for further guided improvements.", - "translation": "尝试使用{Rendercurio_run___layersgui}的网络界面进行更进一步的指导性改进。", - "message": "Try the web interface with {Rendercurio_run___layersgui} for further guided improvements.", - "placeholder": null - }, - { - "id": "Now shut down lotus-miner and lotus-worker and use run {Rendercurio_run} instead.", - "translation": "现在关闭lotus-miner和lotus-worker,改为使用{Rendercurio_run}运行。", - "message": "Now shut down lotus-miner and lotus-worker and use run {Rendercurio_run} instead.", - "placeholder": null - }, - { - "id": "Configuration 'base' was updated to include this miner's address ({MinerAddress}) and its wallet setup.", - "translation": "'base'配置已更新,包括该矿工的地址({MinerAddress})及其钱包设置。", - "message": "Configuration 'base' was updated to include this miner's address ({MinerAddress}) and its wallet setup.", - "placeholder": null - }, - { - "id": "Configuration 'base' was created to resemble this lotus-miner's config.toml .", - "translation": "'base'配置已创建,以类似于这个lotus-miner的config.toml。", - "message": "Configuration 'base' was created to resemble this lotus-miner's config.toml .", - "placeholder": null - } - ] -} \ No newline at end of file diff --git a/cmd/curio/internal/translations/locales/zh/out.gotext.json b/cmd/curio/internal/translations/locales/zh/out.gotext.json deleted file mode 100644 index bb9d25e4cad..00000000000 --- a/cmd/curio/internal/translations/locales/zh/out.gotext.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "language": "zh", - "messages": [] - } diff --git a/cmd/curio/internal/translations/translations.go b/cmd/curio/internal/translations/translations.go deleted file mode 100644 index 361e8e89401..00000000000 --- a/cmd/curio/internal/translations/translations.go +++ /dev/null @@ -1,27 +0,0 @@ -// Usage: -// To UPDATE translations: -// -// 1. add/change strings in guidedsetup folder that use d.T() or d.say(). -// -// 2. run `go generate` in the cmd/curio/internal/translations/ folder. -// -// 3. ChatGPT 3.5 can translate the ./locales/??/out.gotext.json files' -// which ONLY include the un-translated messages. -// APPEND to the messages.gotext.json files's array. -// -// ChatGPT fuss: -// - on a good day, you may need to hit "continue generating". -// - > 60? you'll need to give it sections of the file. -// -// 4. Re-import with `go generate` again. -// -// To ADD a language: -// 1. Add it to the list in updateLang.sh -// 2. Run `go generate` in the cmd/curio/internal/translations/ folder. -// 3. Follow the "Update translations" steps here. -// 4. Code will auto-detect the new language and use it. -// -// FUTURE Reliability: OpenAPI automation. -package translations - -//go:generate ./updateLang.sh diff --git a/cmd/curio/internal/translations/updateLang.sh b/cmd/curio/internal/translations/updateLang.sh deleted file mode 100755 index 984f63fd5d8..00000000000 --- a/cmd/curio/internal/translations/updateLang.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -#OP: Only run if some file in ../guidedsetup* is newer than catalog.go -# Change this condition if using translations more widely. -if [ "$(find ../../guidedsetup/* -newer catalog.go)" ] || [ "$(find locales/* -newer catalog.go)" ]; then - gotext -srclang=en update -out=catalog.go -lang=en,zh,ko github.com/filecoin-project/lotus/cmd/curio/guidedsetup - go run knowns/main.go locales/zh locales/ko -fi diff --git a/cmd/curio/log.go b/cmd/curio/log.go deleted file mode 100644 index 0af41a679dd..00000000000 --- a/cmd/curio/log.go +++ /dev/null @@ -1,105 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/cmd/curio/rpc" -) - -var logCmd = &cli.Command{ - Name: "log", - Usage: "Manage logging", - Subcommands: []*cli.Command{ - LogList, - LogSetLevel, - }, -} - -var LogList = &cli.Command{ - Name: "list", - Usage: "List log systems", - Action: func(cctx *cli.Context) error { - minerApi, closer, err := rpc.GetCurioAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := lcli.ReqContext(cctx) - - systems, err := minerApi.LogList(ctx) - if err != nil { - return err - } - - for _, system := range systems { - fmt.Println(system) - } - - return nil - }, -} - -var LogSetLevel = &cli.Command{ - Name: "set-level", - Usage: "Set log level", - ArgsUsage: "[level]", - Description: `Set the log level for logging systems: - - The system flag can be specified multiple times. - - eg) log set-level --system chain --system chainxchg debug - - Available Levels: - debug - info - warn - error - - Environment Variables: - GOLOG_LOG_LEVEL - Default log level for all log systems - GOLOG_LOG_FMT - Change output log format (json, nocolor) - GOLOG_FILE - Write logs to file - GOLOG_OUTPUT - Specify whether to output to file, stderr, stdout or a combination, i.e. file+stderr -`, - Flags: []cli.Flag{ - &cli.StringSliceFlag{ - Name: "system", - Usage: "limit to log system", - Value: &cli.StringSlice{}, - }, - }, - Action: func(cctx *cli.Context) error { - minerApi, closer, err := rpc.GetCurioAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := lcli.ReqContext(cctx) - - if !cctx.Args().Present() { - return fmt.Errorf("level is required") - } - - systems := cctx.StringSlice("system") - if len(systems) == 0 { - var err error - systems, err = minerApi.LogList(ctx) - if err != nil { - return err - } - } - - for _, system := range systems { - if err := minerApi.LogSetLevel(ctx, system, cctx.Args().First()); err != nil { - return xerrors.Errorf("setting log level on %s: %v", system, err) - } - } - - return nil - }, -} diff --git a/cmd/curio/main.go b/cmd/curio/main.go deleted file mode 100644 index 9a092dad0f5..00000000000 --- a/cmd/curio/main.go +++ /dev/null @@ -1,189 +0,0 @@ -package main - -import ( - "context" - "fmt" - "os" - "os/signal" - "runtime/pprof" - "syscall" - - "github.com/docker/go-units" - "github.com/fatih/color" - logging "github.com/ipfs/go-log/v2" - "github.com/mitchellh/go-homedir" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-paramfetch" - - "github.com/filecoin-project/lotus/build" - lcli "github.com/filecoin-project/lotus/cli" - cliutil "github.com/filecoin-project/lotus/cli/util" - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/cmd/curio/guidedsetup" - "github.com/filecoin-project/lotus/lib/lotuslog" - "github.com/filecoin-project/lotus/lib/tracing" - "github.com/filecoin-project/lotus/node/repo" -) - -var log = logging.Logger("main") - -const ( - FlagMinerRepo = "miner-repo" -) - -func setupCloseHandler() { - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - go func() { - <-c - fmt.Println("\r- Ctrl+C pressed in Terminal") - _ = pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) - panic(1) - }() -} - -func main() { - - lotuslog.SetupLogLevels() - - local := []*cli.Command{ - cliCmd, - runCmd, - stopCmd, - configCmd, - testCmd, - webCmd, - guidedsetup.GuidedsetupCmd, - sealCmd, - marketCmd, - fetchParamCmd, - } - - jaeger := tracing.SetupJaegerTracing("curio") - defer func() { - if jaeger != nil { - _ = jaeger.ForceFlush(context.Background()) - } - }() - - for _, cmd := range local { - cmd := cmd - originBefore := cmd.Before - cmd.Before = func(cctx *cli.Context) error { - if jaeger != nil { - _ = jaeger.Shutdown(cctx.Context) - } - jaeger = tracing.SetupJaegerTracing("curio/" + cmd.Name) - - if cctx.IsSet("color") { - color.NoColor = !cctx.Bool("color") - } - - if originBefore != nil { - return originBefore(cctx) - } - - return nil - } - } - - app := &cli.App{ - Name: "curio", - Usage: "Filecoin decentralized storage network provider", - Version: build.UserVersion(), - EnableBashCompletion: true, - Before: func(c *cli.Context) error { - setupCloseHandler() - return nil - }, - Flags: []cli.Flag{ - &cli.BoolFlag{ - // examined in the Before above - Name: "color", - Usage: "use color in display output", - DefaultText: "depends on output being a TTY", - }, - &cli.StringFlag{ - Name: "panic-reports", - EnvVars: []string{"CURIO_PANIC_REPORT_PATH"}, - Hidden: true, - Value: "~/.curio", // should follow --repo default - }, - &cli.StringFlag{ - Name: "db-host", - EnvVars: []string{"CURIO_DB_HOST", "CURIO_HARMONYDB_HOSTS"}, - Usage: "Command separated list of hostnames for yugabyte cluster", - Value: "127.0.0.1", - }, - &cli.StringFlag{ - Name: "db-name", - EnvVars: []string{"CURIO_DB_NAME", "CURIO_HARMONYDB_NAME"}, - Value: "yugabyte", - }, - &cli.StringFlag{ - Name: "db-user", - EnvVars: []string{"CURIO_DB_USER", "CURIO_HARMONYDB_USERNAME"}, - Value: "yugabyte", - }, - &cli.StringFlag{ - Name: "db-password", - EnvVars: []string{"CURIO_DB_PASSWORD", "CURIO_HARMONYDB_PASSWORD"}, - Value: "yugabyte", - }, - &cli.StringFlag{ - Name: "db-port", - EnvVars: []string{"CURIO_DB_PORT", "CURIO_HARMONYDB_PORT"}, - Value: "5433", - }, - &cli.StringFlag{ - Name: deps.FlagRepoPath, - EnvVars: []string{"CURIO_REPO_PATH"}, - Value: "~/.curio", - }, - cliutil.FlagVeryVerbose, - }, - Commands: local, - After: func(c *cli.Context) error { - if r := recover(); r != nil { - p, err := homedir.Expand(c.String(FlagMinerRepo)) - if err != nil { - log.Errorw("could not expand repo path for panic report", "error", err) - panic(r) - } - - // Generate report in CURIO_PATH and re-raise panic - build.GeneratePanicReport(c.String("panic-reports"), p, c.App.Name) - panic(r) - } - return nil - }, - } - app.Setup() - app.Metadata["repoType"] = repo.Curio - lcli.RunApp(app) -} - -var fetchParamCmd = &cli.Command{ - Name: "fetch-params", - Usage: "Fetch proving parameters", - ArgsUsage: "[sectorSize]", - Action: func(cctx *cli.Context) error { - if cctx.NArg() != 1 { - return xerrors.Errorf("incorrect number of arguments") - } - sectorSizeInt, err := units.RAMInBytes(cctx.Args().First()) - if err != nil { - return xerrors.Errorf("error parsing sector size (specify as \"32GiB\", for instance): %w", err) - } - sectorSize := uint64(sectorSizeInt) - - err = paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), build.SrsJSON(), sectorSize) - if err != nil { - return xerrors.Errorf("fetching proof parameters: %w", err) - } - - return nil - }, -} diff --git a/cmd/curio/market.go b/cmd/curio/market.go deleted file mode 100644 index cc562db932c..00000000000 --- a/cmd/curio/market.go +++ /dev/null @@ -1,70 +0,0 @@ -package main - -import ( - "fmt" - "sort" - - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/curiosrc/market/lmrpc" -) - -var marketCmd = &cli.Command{ - Name: "market", - Subcommands: []*cli.Command{ - marketRPCInfoCmd, - }, -} - -var marketRPCInfoCmd = &cli.Command{ - Flags: []cli.Flag{ - &cli.StringSliceFlag{ - Name: "layers", - Usage: "list of layers to be interpreted (atop defaults). Default: base", - }, - }, - Action: func(cctx *cli.Context) error { - db, err := deps.MakeDB(cctx) - if err != nil { - return err - } - - cfg, err := deps.GetConfig(cctx, db) - if err != nil { - return xerrors.Errorf("get config: %w", err) - } - - ts, err := lmrpc.MakeTokens(cfg) - if err != nil { - return xerrors.Errorf("make tokens: %w", err) - } - - var addrTokens []struct { - Address string - Token string - } - - for address, s := range ts { - addrTokens = append(addrTokens, struct { - Address string - Token string - }{ - Address: address.String(), - Token: s, - }) - } - - sort.Slice(addrTokens, func(i, j int) bool { - return addrTokens[i].Address < addrTokens[j].Address - }) - - for _, at := range addrTokens { - fmt.Printf("[lotus-miner/boost compatible] %s %s\n", at.Address, at.Token) - } - - return nil - }, - Name: "rpc-info", -} diff --git a/cmd/curio/migrate.go b/cmd/curio/migrate.go deleted file mode 100644 index 06ab7d0f9a3..00000000000 --- a/cmd/curio/migrate.go +++ /dev/null @@ -1 +0,0 @@ -package main diff --git a/cmd/curio/pipeline.go b/cmd/curio/pipeline.go deleted file mode 100644 index 1c3f5d94a28..00000000000 --- a/cmd/curio/pipeline.go +++ /dev/null @@ -1,135 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" - lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/curiosrc/seal" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" -) - -var sealCmd = &cli.Command{ - Name: "seal", - Usage: "Manage the sealing pipeline", - Subcommands: []*cli.Command{ - sealStartCmd, - }, -} - -var sealStartCmd = &cli.Command{ - Name: "start", - Usage: "Start new sealing operations manually", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "actor", - Usage: "Specify actor address to start sealing sectors for", - Required: true, - }, - &cli.BoolFlag{ - Name: "now", - Usage: "Start sealing sectors for all actors now (not on schedule)", - }, - &cli.BoolFlag{ - Name: "cc", - Usage: "Start sealing new CC sectors", - }, - &cli.IntFlag{ - Name: "count", - Usage: "Number of sectors to start", - Value: 1, - }, - &cli.BoolFlag{ - Name: "synthetic", - Usage: "Use synthetic PoRep", - Value: false, // todo implement synthetic - }, - &cli.StringSliceFlag{ - Name: "layers", - Usage: "list of layers to be interpreted (atop defaults). Default: base", - }, - }, - Action: func(cctx *cli.Context) error { - if !cctx.Bool("now") { - return xerrors.Errorf("schedule not implemented, use --now") - } - if !cctx.IsSet("actor") { - return cli.ShowCommandHelp(cctx, "start") - } - if !cctx.Bool("cc") { - return xerrors.Errorf("only CC sectors supported for now") - } - - act, err := address.NewFromString(cctx.String("actor")) - if err != nil { - return xerrors.Errorf("parsing --actor: %w", err) - } - - ctx := lcli.ReqContext(cctx) - dep, err := deps.GetDepsCLI(ctx, cctx) - if err != nil { - return err - } - - /* - create table sectors_sdr_pipeline ( - sp_id bigint not null, - sector_number bigint not null, - - -- at request time - create_time timestamp not null, - reg_seal_proof int not null, - comm_d_cid text not null, - - [... other not relevant fields] - */ - - mid, err := address.IDFromAddress(act) - if err != nil { - return xerrors.Errorf("getting miner id: %w", err) - } - - mi, err := dep.Full.StateMinerInfo(ctx, act, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting miner info: %w", err) - } - - nv, err := dep.Full.StateNetworkVersion(ctx, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting network version: %w", err) - } - - wpt := mi.WindowPoStProofType - spt, err := miner.PreferredSealProofTypeFromWindowPoStType(nv, wpt, cctx.Bool("synthetic")) - if err != nil { - return xerrors.Errorf("getting seal proof type: %w", err) - } - - num, err := seal.AllocateSectorNumbers(ctx, dep.Full, dep.DB, act, cctx.Int("count"), func(tx *harmonydb.Tx, numbers []abi.SectorNumber) (bool, error) { - for _, n := range numbers { - _, err := tx.Exec("insert into sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof) values ($1, $2, $3)", mid, n, spt) - if err != nil { - return false, xerrors.Errorf("inserting into sectors_sdr_pipeline: %w", err) - } - } - return true, nil - }) - if err != nil { - return xerrors.Errorf("allocating sector numbers: %w", err) - } - - for _, number := range num { - fmt.Println(number) - } - - return nil - }, -} diff --git a/cmd/curio/proving.go b/cmd/curio/proving.go deleted file mode 100644 index 3b5a3e0e47b..00000000000 --- a/cmd/curio/proving.go +++ /dev/null @@ -1,204 +0,0 @@ -package main - -import ( - "context" - "database/sql" - "encoding/json" - "errors" - "fmt" - "os" - "time" - - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/dline" - - "github.com/filecoin-project/lotus/cmd/curio/deps" - curio "github.com/filecoin-project/lotus/curiosrc" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" -) - -var testCmd = &cli.Command{ - Name: "test", - Usage: "Utility functions for testing", - Subcommands: []*cli.Command{ - //provingInfoCmd, - wdPostCmd, - }, - Before: func(cctx *cli.Context) error { - return nil - }, -} - -var wdPostCmd = &cli.Command{ - Name: "window-post", - Aliases: []string{"wd", "windowpost", "wdpost"}, - Usage: "Compute a proof-of-spacetime for a sector (requires the sector to be pre-sealed). These will not send to the chain.", - Subcommands: []*cli.Command{ - wdPostHereCmd, - wdPostTaskCmd, - }, -} - -// wdPostTaskCmd writes to harmony_task and wdpost_partition_tasks, then waits for the result. -// It is intended to be used to test the windowpost scheduler. -// The end of the compute task puts the task_id onto wdpost_proofs, which is read by the submit task. -// The submit task will not send test tasks to the chain, and instead will write the result to harmony_test. -// The result is read by this command, and printed to stdout. -var wdPostTaskCmd = &cli.Command{ - Name: "task", - Aliases: []string{"scheduled", "schedule", "async", "asynchronous"}, - Usage: "Test the windowpost scheduler by running it on the next available curio. ", - Flags: []cli.Flag{ - &cli.Uint64Flag{ - Name: "deadline", - Usage: "deadline to compute WindowPoSt for ", - Value: 0, - }, - &cli.StringSliceFlag{ - Name: "layers", - Usage: "list of layers to be interpreted (atop defaults). Default: base", - }, - }, - Action: func(cctx *cli.Context) error { - ctx := context.Background() - - deps, err := deps.GetDeps(ctx, cctx) - if err != nil { - return xerrors.Errorf("get config: %w", err) - } - - ts, err := deps.Full.ChainHead(ctx) - if err != nil { - return xerrors.Errorf("cannot get chainhead %w", err) - } - ht := ts.Height() - - // It's not important to be super-accurate as it's only for basic testing. - addr, err := address.NewFromString(deps.Cfg.Addresses[0].MinerAddresses[0]) - if err != nil { - return xerrors.Errorf("cannot get miner address %w", err) - } - maddr, err := address.IDFromAddress(addr) - if err != nil { - return xerrors.Errorf("cannot get miner id %w", err) - } - var taskId int64 - - _, err = deps.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { - err = tx.QueryRow(`INSERT INTO harmony_task (name, posted_time, added_by) VALUES ('WdPost', CURRENT_TIMESTAMP, 123) RETURNING id`).Scan(&taskId) - if err != nil { - log.Error("inserting harmony_task: ", err) - return false, xerrors.Errorf("inserting harmony_task: %w", err) - } - _, err = tx.Exec(`INSERT INTO wdpost_partition_tasks - (task_id, sp_id, proving_period_start, deadline_index, partition_index) VALUES ($1, $2, $3, $4, $5)`, - taskId, maddr, ht, cctx.Uint64("deadline"), 0) - if err != nil { - log.Error("inserting wdpost_partition_tasks: ", err) - return false, xerrors.Errorf("inserting wdpost_partition_tasks: %w", err) - } - _, err = tx.Exec("INSERT INTO harmony_test (task_id) VALUES ($1)", taskId) - if err != nil { - return false, xerrors.Errorf("inserting into harmony_tests: %w", err) - } - return true, nil - }, harmonydb.OptionRetry()) - if err != nil { - return xerrors.Errorf("writing SQL transaction: %w", err) - } - fmt.Printf("Inserted task %v. Waiting for success ", taskId) - var result sql.NullString - for { - time.Sleep(time.Second) - err = deps.DB.QueryRow(ctx, `SELECT result FROM harmony_test WHERE task_id=$1`, taskId).Scan(&result) - if err != nil { - return xerrors.Errorf("reading result from harmony_test: %w", err) - } - if result.Valid { - break - } - fmt.Print(".") - } - fmt.Println() - log.Infof("Result: %s", result.String) - return nil - }, -} - -// This command is intended to be used to verify PoSt compute performance. -// It will not send any messages to the chain. Since it can compute any deadline, output may be incorrectly timed for the chain. -// The entire processing happens in this process while you wait. It does not use the scheduler. -var wdPostHereCmd = &cli.Command{ - Name: "here", - Aliases: []string{"cli"}, - Usage: "Compute WindowPoSt for performance and configuration testing.", - Description: `Note: This command is intended to be used to verify PoSt compute performance. -It will not send any messages to the chain. Since it can compute any deadline, output may be incorrectly timed for the chain.`, - ArgsUsage: "[deadline index]", - Flags: []cli.Flag{ - &cli.Uint64Flag{ - Name: "deadline", - Usage: "deadline to compute WindowPoSt for ", - Value: 0, - }, - &cli.StringSliceFlag{ - Name: "layers", - Usage: "list of layers to be interpreted (atop defaults). Default: base", - }, - &cli.StringFlag{ - Name: "storage-json", - Usage: "path to json file containing storage config", - Value: "~/.curio/storage.json", - }, - &cli.Uint64Flag{ - Name: "partition", - Usage: "partition to compute WindowPoSt for", - Value: 0, - }, - }, - Action: func(cctx *cli.Context) error { - - ctx := context.Background() - deps, err := deps.GetDeps(ctx, cctx) - if err != nil { - return err - } - - wdPostTask, wdPoStSubmitTask, derlareRecoverTask, err := curio.WindowPostScheduler( - ctx, deps.Cfg.Fees, deps.Cfg.Proving, deps.Full, deps.Verif, deps.LW, nil, nil, - deps.As, deps.Maddrs, deps.DB, deps.Stor, deps.Si, deps.Cfg.Subsystems.WindowPostMaxTasks) - if err != nil { - return err - } - _, _ = wdPoStSubmitTask, derlareRecoverTask - - if len(deps.Maddrs) == 0 { - return errors.New("no miners to compute WindowPoSt for") - } - head, err := deps.Full.ChainHead(ctx) - if err != nil { - return xerrors.Errorf("failed to get chain head: %w", err) - } - - di := dline.NewInfo(head.Height(), cctx.Uint64("deadline"), 0, 0, 0, 10 /*challenge window*/, 0, 0) - - for maddr := range deps.Maddrs { - out, err := wdPostTask.DoPartition(ctx, head, address.Address(maddr), di, cctx.Uint64("partition")) - if err != nil { - fmt.Println("Error computing WindowPoSt for miner", maddr, err) - continue - } - fmt.Println("Computed WindowPoSt for miner", maddr, ":") - err = json.NewEncoder(os.Stdout).Encode(out) - if err != nil { - fmt.Println("Could not encode WindowPoSt output for miner", maddr, err) - continue - } - } - - return nil - }, -} diff --git a/cmd/curio/rpc/rpc.go b/cmd/curio/rpc/rpc.go deleted file mode 100644 index 1b2bb25e643..00000000000 --- a/cmd/curio/rpc/rpc.go +++ /dev/null @@ -1,325 +0,0 @@ -// Package rpc provides all direct access to this node. -package rpc - -import ( - "context" - "encoding/base64" - "encoding/json" - "net" - "net/http" - "net/url" - "os" - "path/filepath" - "time" - - "github.com/gbrlsnchs/jwt/v3" - "github.com/google/uuid" - "github.com/gorilla/mux" - logging "github.com/ipfs/go-log/v2" - "github.com/mitchellh/go-homedir" - "github.com/urfave/cli/v2" - "go.opencensus.io/tag" - "golang.org/x/sync/errgroup" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-jsonrpc" - "github.com/filecoin-project/go-jsonrpc/auth" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/client" - cliutil "github.com/filecoin-project/lotus/cli/util" - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/curiosrc/market" - "github.com/filecoin-project/lotus/curiosrc/web" - "github.com/filecoin-project/lotus/lib/rpcenc" - "github.com/filecoin-project/lotus/metrics" - "github.com/filecoin-project/lotus/metrics/proxy" - "github.com/filecoin-project/lotus/node/repo" - "github.com/filecoin-project/lotus/storage/paths" - "github.com/filecoin-project/lotus/storage/sealer/fsutil" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -const metaFile = "sectorstore.json" - -var log = logging.Logger("curio/rpc") -var permissioned = os.Getenv("LOTUS_DISABLE_AUTH_PERMISSIONED") != "1" - -func CurioHandler( - authv func(ctx context.Context, token string) ([]auth.Permission, error), - remote http.HandlerFunc, - a api.Curio, - permissioned bool) http.Handler { - mux := mux.NewRouter() - readerHandler, readerServerOpt := rpcenc.ReaderParamDecoder() - rpcServer := jsonrpc.NewServer(jsonrpc.WithServerErrors(api.RPCErrors), readerServerOpt) - - wapi := proxy.MetricedAPI[api.Curio, api.CurioStruct](a) - if permissioned { - wapi = api.PermissionedAPI[api.Curio, api.CurioStruct](wapi) - } - - rpcServer.Register("Filecoin", wapi) - rpcServer.AliasMethod("rpc.discover", "Filecoin.Discover") - - mux.Handle("/rpc/v0", rpcServer) - mux.Handle("/rpc/streams/v0/push/{uuid}", readerHandler) - mux.PathPrefix("/remote").HandlerFunc(remote) - mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof - - if !permissioned { - return mux - } - - ah := &auth.Handler{ - Verify: authv, - Next: mux.ServeHTTP, - } - return ah -} - -type CurioAPI struct { - *deps.Deps - paths.SectorIndex - ShutdownChan chan struct{} -} - -func (p *CurioAPI) Version(context.Context) (api.Version, error) { - return api.CurioAPIVersion0, nil -} -func (p *CurioAPI) StorageDetachLocal(ctx context.Context, path string) error { - path, err := homedir.Expand(path) - if err != nil { - return xerrors.Errorf("expanding local path: %w", err) - } - - // check that we have the path opened - lps, err := p.LocalStore.Local(ctx) - if err != nil { - return xerrors.Errorf("getting local path list: %w", err) - } - - var localPath *storiface.StoragePath - for _, lp := range lps { - if lp.LocalPath == path { - lp := lp // copy to make the linter happy - localPath = &lp - break - } - } - if localPath == nil { - return xerrors.Errorf("no local paths match '%s'", path) - } - - // drop from the persisted storage.json - var found bool - if err := p.LocalPaths.SetStorage(func(sc *storiface.StorageConfig) { - out := make([]storiface.LocalPath, 0, len(sc.StoragePaths)) - for _, storagePath := range sc.StoragePaths { - if storagePath.Path != path { - out = append(out, storagePath) - continue - } - found = true - } - sc.StoragePaths = out - }); err != nil { - return xerrors.Errorf("set storage config: %w", err) - } - if !found { - // maybe this is fine? - return xerrors.Errorf("path not found in storage.json") - } - - // unregister locally, drop from sector index - return p.LocalStore.ClosePath(ctx, localPath.ID) -} - -func (p *CurioAPI) StorageLocal(ctx context.Context) (map[storiface.ID]string, error) { - ps, err := p.LocalStore.Local(ctx) - if err != nil { - return nil, err - } - - var out = make(map[storiface.ID]string) - for _, path := range ps { - out[path.ID] = path.LocalPath - } - - return out, nil -} - -func (p *CurioAPI) StorageStat(ctx context.Context, id storiface.ID) (fsutil.FsStat, error) { - return p.Stor.FsStat(ctx, id) -} - -func (p *CurioAPI) AllocatePieceToSector(ctx context.Context, maddr address.Address, piece api.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) { - di := market.NewPieceIngester(p.Deps.DB, p.Deps.Full) - - return di.AllocatePieceToSector(ctx, maddr, piece, rawSize, source, header) -} - -// Trigger shutdown -func (p *CurioAPI) Shutdown(context.Context) error { - close(p.ShutdownChan) - return nil -} - -func (p *CurioAPI) StorageInit(ctx context.Context, path string, opts storiface.LocalStorageMeta) error { - path, err := homedir.Expand(path) - if err != nil { - return xerrors.Errorf("expanding local path: %w", err) - } - - if err := os.MkdirAll(path, 0755); err != nil { - if !os.IsExist(err) { - return err - } - } - _, err = os.Stat(filepath.Join(path, metaFile)) - if !os.IsNotExist(err) { - if err == nil { - return xerrors.Errorf("path is already initialized") - } - return err - } - if opts.ID == "" { - opts.ID = storiface.ID(uuid.New().String()) - } - if !(opts.CanStore || opts.CanSeal) { - return xerrors.Errorf("must specify at least one of --store or --seal") - } - b, err := json.MarshalIndent(opts, "", " ") - if err != nil { - return xerrors.Errorf("marshaling storage config: %w", err) - } - if err := os.WriteFile(filepath.Join(path, metaFile), b, 0644); err != nil { - return xerrors.Errorf("persisting storage metadata (%s): %w", filepath.Join(path, metaFile), err) - } - return nil -} - -func (p *CurioAPI) StorageAddLocal(ctx context.Context, path string) error { - path, err := homedir.Expand(path) - if err != nil { - return xerrors.Errorf("expanding local path: %w", err) - } - - if err := p.LocalStore.OpenPath(ctx, path); err != nil { - return xerrors.Errorf("opening local path: %w", err) - } - - if err := p.LocalPaths.SetStorage(func(sc *storiface.StorageConfig) { - sc.StoragePaths = append(sc.StoragePaths, storiface.LocalPath{Path: path}) - }); err != nil { - return xerrors.Errorf("get storage config: %w", err) - } - - return nil -} - -func (p *CurioAPI) LogList(ctx context.Context) ([]string, error) { - return logging.GetSubsystems(), nil -} - -func (p *CurioAPI) LogSetLevel(ctx context.Context, subsystem, level string) error { - return logging.SetLogLevel(subsystem, level) -} - -func ListenAndServe(ctx context.Context, dependencies *deps.Deps, shutdownChan chan struct{}) error { - fh := &paths.FetchHandler{Local: dependencies.LocalStore, PfHandler: &paths.DefaultPartialFileHandler{}} - remoteHandler := func(w http.ResponseWriter, r *http.Request) { - if !auth.HasPerm(r.Context(), nil, api.PermAdmin) { - w.WriteHeader(401) - _ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing admin permission"}) - return - } - - fh.ServeHTTP(w, r) - } - - var authVerify func(context.Context, string) ([]auth.Permission, error) - { - privateKey, err := base64.StdEncoding.DecodeString(dependencies.Cfg.Apis.StorageRPCSecret) - if err != nil { - return xerrors.Errorf("decoding storage rpc secret: %w", err) - } - authVerify = func(ctx context.Context, token string) ([]auth.Permission, error) { - var payload deps.JwtPayload - if _, err := jwt.Verify([]byte(token), jwt.NewHS256(privateKey), &payload); err != nil { - return nil, xerrors.Errorf("JWT Verification failed: %w", err) - } - - return payload.Allow, nil - } - } - // Serve the RPC. - srv := &http.Server{ - Handler: CurioHandler( - authVerify, - remoteHandler, - &CurioAPI{dependencies, dependencies.Si, shutdownChan}, - permissioned), - ReadHeaderTimeout: time.Minute * 3, - BaseContext: func(listener net.Listener) context.Context { - ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-worker")) - return ctx - }, - Addr: dependencies.ListenAddr, - } - - log.Infof("Setting up RPC server at %s", dependencies.ListenAddr) - eg := errgroup.Group{} - eg.Go(srv.ListenAndServe) - - if dependencies.Cfg.Subsystems.EnableWebGui { - web, err := web.GetSrv(ctx, dependencies) - if err != nil { - return err - } - - go func() { - <-ctx.Done() - log.Warn("Shutting down...") - if err := srv.Shutdown(context.TODO()); err != nil { - log.Errorf("shutting down RPC server failed: %s", err) - } - if err := web.Shutdown(context.Background()); err != nil { - log.Errorf("shutting down web server failed: %s", err) - } - log.Warn("Graceful shutdown successful") - }() - - uiAddress := dependencies.Cfg.Subsystems.GuiAddress - if uiAddress == "" || uiAddress[0] == ':' { - uiAddress = "localhost" + uiAddress - } - log.Infof("GUI: http://%s", uiAddress) - eg.Go(web.ListenAndServe) - } - return eg.Wait() -} - -func GetCurioAPI(ctx *cli.Context) (api.Curio, jsonrpc.ClientCloser, error) { - addr, headers, err := cliutil.GetRawAPI(ctx, repo.Curio, "v0") - if err != nil { - return nil, nil, err - } - - u, err := url.Parse(addr) - if err != nil { - return nil, nil, xerrors.Errorf("parsing miner api URL: %w", err) - } - - switch u.Scheme { - case "ws": - u.Scheme = "http" - case "wss": - u.Scheme = "https" - } - - addr = u.String() - - return client.NewCurioRpc(ctx.Context, addr, headers) -} diff --git a/cmd/curio/run.go b/cmd/curio/run.go deleted file mode 100644 index cacacfc0fee..00000000000 --- a/cmd/curio/run.go +++ /dev/null @@ -1,196 +0,0 @@ -package main - -import ( - "context" - "fmt" - "os" - "strings" - - "github.com/pkg/errors" - "github.com/urfave/cli/v2" - "go.opencensus.io/stats" - "go.opencensus.io/tag" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/build" - lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/cmd/curio/rpc" - "github.com/filecoin-project/lotus/cmd/curio/tasks" - "github.com/filecoin-project/lotus/curiosrc/market/lmrpc" - "github.com/filecoin-project/lotus/lib/ulimit" - "github.com/filecoin-project/lotus/metrics" - "github.com/filecoin-project/lotus/node" -) - -type stackTracer interface { - StackTrace() errors.StackTrace -} - -var runCmd = &cli.Command{ - Name: "run", - Usage: "Start a Curio process", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "listen", - Usage: "host address and port the worker api will listen on", - Value: "0.0.0.0:12300", - EnvVars: []string{"LOTUS_WORKER_LISTEN"}, - }, - &cli.BoolFlag{ - Name: "nosync", - Usage: "don't check full-node sync status", - }, - &cli.BoolFlag{ - Name: "halt-after-init", - Usage: "only run init, then return", - Hidden: true, - }, - &cli.BoolFlag{ - Name: "manage-fdlimit", - Usage: "manage open file limit", - Value: true, - }, - &cli.StringFlag{ - Name: "storage-json", - Usage: "path to json file containing storage config", - Value: "~/.curio/storage.json", - }, - &cli.StringFlag{ - Name: "journal", - Usage: "path to journal files", - Value: "~/.curio/", - }, - &cli.StringSliceFlag{ - Name: "layers", - Aliases: []string{"l", "layer"}, - Usage: "list of layers to be interpreted (atop defaults). Default: base", - }, - }, - Action: func(cctx *cli.Context) (err error) { - defer func() { - if err != nil { - if err, ok := err.(stackTracer); ok { - for _, f := range err.StackTrace() { - fmt.Printf("%+s:%d\n", f, f) - } - } - } - }() - if !cctx.Bool("enable-gpu-proving") { - err := os.Setenv("BELLMAN_NO_GPU", "true") - if err != nil { - return err - } - } - - if err := os.MkdirAll(os.TempDir(), 0755); err != nil { - log.Errorf("ensuring tempdir exists: %s", err) - } - - ctx, _ := tag.New(lcli.DaemonContext(cctx), - tag.Insert(metrics.Version, build.BuildVersion), - tag.Insert(metrics.Commit, build.CurrentCommit), - tag.Insert(metrics.NodeType, "curio"), - ) - shutdownChan := make(chan struct{}) - { - var ctxclose func() - ctx, ctxclose = context.WithCancel(ctx) - go func() { - <-shutdownChan - ctxclose() - }() - } - // Register all metric views - /* - if err := view.Register( - metrics.MinerNodeViews..., - ); err != nil { - log.Fatalf("Cannot register the view: %v", err) - } - */ - // Set the metric to one so it is published to the exporter - stats.Record(ctx, metrics.LotusInfo.M(1)) - - if cctx.Bool("manage-fdlimit") { - if _, _, err := ulimit.ManageFdLimit(); err != nil { - log.Errorf("setting file descriptor limit: %s", err) - } - } - - dependencies := &deps.Deps{} - err = dependencies.PopulateRemainingDeps(ctx, cctx, true) - if err != nil { - return err - } - - taskEngine, err := tasks.StartTasks(ctx, dependencies) - - if err != nil { - return nil - } - defer taskEngine.GracefullyTerminate() - - if err := lmrpc.ServeCurioMarketRPCFromConfig(dependencies.DB, dependencies.Full, dependencies.Cfg); err != nil { - return xerrors.Errorf("starting market RPCs: %w", err) - } - - err = rpc.ListenAndServe(ctx, dependencies, shutdownChan) // Monitor for shutdown. - if err != nil { - return err - } - - finishCh := node.MonitorShutdown(shutdownChan) //node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper}, - //node.ShutdownHandler{Component: "curio", StopFunc: stop}, - - <-finishCh - return nil - }, -} - -var webCmd = &cli.Command{ - Name: "web", - Usage: "Start Curio web interface", - Description: `Start an instance of Curio web interface. - This creates the 'web' layer if it does not exist, then calls run with that layer.`, - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "listen", - Usage: "Address to listen on", - Value: "127.0.0.1:4701", - }, - &cli.BoolFlag{ - Name: "nosync", - Usage: "don't check full-node sync status", - }, - &cli.StringSliceFlag{ - Name: "layers", - Usage: "list of layers to be interpreted (atop defaults). Default: base", - }, - }, - Action: func(cctx *cli.Context) error { - - db, err := deps.MakeDB(cctx) - if err != nil { - return err - } - - webtxt, err := getConfig(db, "web") - if err != nil || webtxt == "" { - - s := `[Susbystems] - EnableWebGui = true - ` - if err = setConfig(db, "web", s); err != nil { - return err - } - } - layers := append([]string{"web"}, cctx.StringSlice("layers")...) - err = cctx.Set("layers", strings.Join(layers, ",")) - if err != nil { - return err - } - return runCmd.Action(cctx) - }, -} diff --git a/cmd/curio/stop.go b/cmd/curio/stop.go deleted file mode 100644 index eb61a34fa4e..00000000000 --- a/cmd/curio/stop.go +++ /dev/null @@ -1,30 +0,0 @@ -package main - -import ( - _ "net/http/pprof" - - "github.com/urfave/cli/v2" - - lcli "github.com/filecoin-project/lotus/cli" -) - -var stopCmd = &cli.Command{ - Name: "stop", - Usage: "Stop a running Curio process", - Flags: []cli.Flag{}, - Action: func(cctx *cli.Context) error { - - api, closer, err := lcli.GetAPI(cctx) - if err != nil { - return err - } - defer closer() - - err = api.Shutdown(lcli.ReqContext(cctx)) - if err != nil { - return err - } - - return nil - }, -} diff --git a/cmd/curio/storage.go b/cmd/curio/storage.go deleted file mode 100644 index 2fa6d2d5291..00000000000 --- a/cmd/curio/storage.go +++ /dev/null @@ -1,499 +0,0 @@ -package main - -import ( - "fmt" - "math/bits" - "sort" - "strconv" - "strings" - "time" - - "github.com/docker/go-units" - "github.com/fatih/color" - "github.com/google/uuid" - "github.com/mitchellh/go-homedir" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/chain/types" - lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/cmd/curio/rpc" - "github.com/filecoin-project/lotus/storage/sealer/fsutil" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -var storageCmd = &cli.Command{ - Name: "storage", - Usage: "manage sector storage", - Description: `Sectors can be stored across many filesystem paths. These -commands provide ways to manage the storage the miner will used to store sectors -long term for proving (references as 'store') as well as how sectors will be -stored while moving through the sealing pipeline (references as 'seal').`, - Subcommands: []*cli.Command{ - storageAttachCmd, - storageDetachCmd, - storageListCmd, - storageFindCmd, - /*storageDetachCmd, - storageRedeclareCmd, - storageCleanupCmd, - storageLocks,*/ - }, -} - -var storageAttachCmd = &cli.Command{ - Name: "attach", - Usage: "attach local storage path", - ArgsUsage: "[path]", - Description: `Storage can be attached to the miner using this command. The storage volume -list is stored local to the miner in storage.json set in curio run. We do not -recommend manually modifying this value without further understanding of the -storage system. - -Each storage volume contains a configuration file which describes the -capabilities of the volume. When the '--init' flag is provided, this file will -be created using the additional flags. - -Weight -A high weight value means data will be more likely to be stored in this path - -Seal -Data for the sealing process will be stored here - -Store -Finalized sectors that will be moved here for long term storage and be proven -over time - `, - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "init", - Usage: "initialize the path first", - }, - &cli.Uint64Flag{ - Name: "weight", - Usage: "(for init) path weight", - Value: 10, - }, - &cli.BoolFlag{ - Name: "seal", - Usage: "(for init) use path for sealing", - }, - &cli.BoolFlag{ - Name: "store", - Usage: "(for init) use path for long-term storage", - }, - &cli.StringFlag{ - Name: "max-storage", - Usage: "(for init) limit storage space for sectors (expensive for very large paths!)", - }, - &cli.StringSliceFlag{ - Name: "groups", - Usage: "path group names", - }, - &cli.StringSliceFlag{ - Name: "allow-to", - Usage: "path groups allowed to pull data from this path (allow all if not specified)", - }, - }, - Action: func(cctx *cli.Context) error { - minerApi, closer, err := rpc.GetCurioAPI(cctx) - if err != nil { - return err - } - - defer closer() - ctx := lcli.ReqContext(cctx) - - if cctx.NArg() != 1 { - return lcli.IncorrectNumArgs(cctx) - } - - p, err := homedir.Expand(cctx.Args().First()) - if err != nil { - return xerrors.Errorf("expanding path: %w", err) - } - - if cctx.Bool("init") { - var maxStor int64 - if cctx.IsSet("max-storage") { - maxStor, err = units.RAMInBytes(cctx.String("max-storage")) - if err != nil { - return xerrors.Errorf("parsing max-storage: %w", err) - } - } - - cfg := storiface.LocalStorageMeta{ - ID: storiface.ID(uuid.New().String()), - Weight: cctx.Uint64("weight"), - CanSeal: cctx.Bool("seal"), - CanStore: cctx.Bool("store"), - MaxStorage: uint64(maxStor), - Groups: cctx.StringSlice("groups"), - AllowTo: cctx.StringSlice("allow-to"), - } - - if !(cfg.CanStore || cfg.CanSeal) { - return xerrors.Errorf("must specify at least one of --store or --seal") - } - - if err := minerApi.StorageInit(ctx, p, cfg); err != nil { - return xerrors.Errorf("init storage: %w", err) - } - } - - return minerApi.StorageAddLocal(ctx, p) - }, -} - -var storageDetachCmd = &cli.Command{ - Name: "detach", - Usage: "detach local storage path", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "really-do-it", - }, - }, - ArgsUsage: "[path]", - Action: func(cctx *cli.Context) error { - minerApi, closer, err := rpc.GetCurioAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := lcli.ReqContext(cctx) - - if cctx.NArg() != 1 { - return lcli.IncorrectNumArgs(cctx) - } - - p, err := homedir.Expand(cctx.Args().First()) - if err != nil { - return xerrors.Errorf("expanding path: %w", err) - } - - if !cctx.Bool("really-do-it") { - return xerrors.Errorf("pass --really-do-it to execute the action") - } - - return minerApi.StorageDetachLocal(ctx, p) - }, -} - -var storageListCmd = &cli.Command{ - Name: "list", - Usage: "list local storage paths", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "local", - Usage: "only list local storage paths", - }, - }, - Subcommands: []*cli.Command{ - //storageListSectorsCmd, - }, - Action: func(cctx *cli.Context) error { - minerApi, closer, err := rpc.GetCurioAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := lcli.ReqContext(cctx) - - st, err := minerApi.StorageList(ctx) - if err != nil { - return err - } - - local, err := minerApi.StorageLocal(ctx) - if err != nil { - return err - } - - type fsInfo struct { - storiface.ID - sectors []storiface.Decl - stat fsutil.FsStat - } - - sorted := make([]fsInfo, 0, len(st)) - for id, decls := range st { - if cctx.Bool("local") { - if _, ok := local[id]; !ok { - continue - } - } - - st, err := minerApi.StorageStat(ctx, id) - if err != nil { - sorted = append(sorted, fsInfo{ID: id, sectors: decls}) - continue - } - - sorted = append(sorted, fsInfo{id, decls, st}) - } - - sort.Slice(sorted, func(i, j int) bool { - if sorted[i].stat.Capacity != sorted[j].stat.Capacity { - return sorted[i].stat.Capacity > sorted[j].stat.Capacity - } - return sorted[i].ID < sorted[j].ID - }) - - for _, s := range sorted { - - var cnt [5]int - for _, decl := range s.sectors { - for i := range cnt { - if decl.SectorFileType&(1< 98: - percCol = color.FgRed - case usedPercent > 90: - percCol = color.FgYellow - } - - set := (st.Capacity - st.FSAvailable) * barCols / st.Capacity - used := (st.Capacity - (st.FSAvailable + st.Reserved)) * barCols / st.Capacity - reserved := set - used - bar := safeRepeat("#", int(used)) + safeRepeat("*", int(reserved)) + safeRepeat(" ", int(barCols-set)) - - desc := "" - if st.Max > 0 { - desc = " (filesystem)" - } - - fmt.Printf("\t[%s] %s/%s %s%s\n", color.New(percCol).Sprint(bar), - types.SizeStr(types.NewInt(uint64(st.Capacity-st.FSAvailable))), - types.SizeStr(types.NewInt(uint64(st.Capacity))), - color.New(percCol).Sprintf("%d%%", usedPercent), desc) - } - - // optional configured limit bar - if st.Max > 0 { - usedPercent := st.Used * 100 / st.Max - - percCol := color.FgGreen - switch { - case usedPercent > 98: - percCol = color.FgRed - case usedPercent > 90: - percCol = color.FgYellow - } - - set := st.Used * barCols / st.Max - used := (st.Used + st.Reserved) * barCols / st.Max - reserved := set - used - bar := safeRepeat("#", int(used)) + safeRepeat("*", int(reserved)) + safeRepeat(" ", int(barCols-set)) - - fmt.Printf("\t[%s] %s/%s %s (limit)\n", color.New(percCol).Sprint(bar), - types.SizeStr(types.NewInt(uint64(st.Used))), - types.SizeStr(types.NewInt(uint64(st.Max))), - color.New(percCol).Sprintf("%d%%", usedPercent)) - } - - fmt.Printf("\t%s; %s; %s; %s; %s; Reserved: %s\n", - color.YellowString("Unsealed: %d", cnt[0]), - color.GreenString("Sealed: %d", cnt[1]), - color.BlueString("Caches: %d", cnt[2]), - color.GreenString("Updated: %d", cnt[3]), - color.BlueString("Update-caches: %d", cnt[4]), - types.SizeStr(types.NewInt(uint64(st.Reserved)))) - - si, err := minerApi.StorageInfo(ctx, s.ID) - if err != nil { - return err - } - - fmt.Print("\t") - if si.CanSeal || si.CanStore { - fmt.Printf("Weight: %d; Use: ", si.Weight) - if si.CanSeal { - fmt.Print(color.MagentaString("Seal ")) - } - if si.CanStore { - fmt.Print(color.CyanString("Store")) - } - } else { - fmt.Print(color.HiYellowString("Use: ReadOnly")) - } - fmt.Println() - - if len(si.Groups) > 0 { - fmt.Printf("\tGroups: %s\n", strings.Join(si.Groups, ", ")) - } - if len(si.AllowTo) > 0 { - fmt.Printf("\tAllowTo: %s\n", strings.Join(si.AllowTo, ", ")) - } - - if len(si.AllowTypes) > 0 || len(si.DenyTypes) > 0 { - denied := storiface.FTAll.SubAllowed(si.AllowTypes, si.DenyTypes) - allowed := storiface.FTAll ^ denied - - switch { - case bits.OnesCount64(uint64(allowed)) == 0: - fmt.Printf("\tAllow Types: %s\n", color.RedString("None")) - case bits.OnesCount64(uint64(allowed)) < bits.OnesCount64(uint64(denied)): - fmt.Printf("\tAllow Types: %s\n", color.GreenString(strings.Join(allowed.Strings(), " "))) - default: - fmt.Printf("\tDeny Types: %s\n", color.RedString(strings.Join(denied.Strings(), " "))) - } - } - - if localPath, ok := local[s.ID]; ok { - fmt.Printf("\tLocal: %s\n", color.GreenString(localPath)) - } - for i, l := range si.URLs { - var rtt string - if _, ok := local[s.ID]; !ok && i == 0 { - rtt = " (latency: " + ping.Truncate(time.Microsecond*100).String() + ")" - } - - fmt.Printf("\tURL: %s%s\n", l, rtt) // TODO; try pinging maybe?? print latency? - } - fmt.Println() - } - - return nil - }, -} - -type storedSector struct { - id storiface.ID - store storiface.SectorStorageInfo - types map[storiface.SectorFileType]bool -} - -var storageFindCmd = &cli.Command{ - Name: "find", - Usage: "find sector in the storage system", - ArgsUsage: "[miner address] [sector number]", - Action: func(cctx *cli.Context) error { - minerApi, closer, err := rpc.GetCurioAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := lcli.ReqContext(cctx) - - if cctx.NArg() != 2 { - return lcli.IncorrectNumArgs(cctx) - } - - maddr := cctx.Args().First() - ma, err := address.NewFromString(maddr) - if err != nil { - return xerrors.Errorf("parsing miner address: %w", err) - } - - mid, err := address.IDFromAddress(ma) - if err != nil { - return err - } - - if !cctx.Args().Present() { - return xerrors.New("Usage: lotus-miner storage find [sector number]") - } - - snum, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64) - if err != nil { - return err - } - - sid := abi.SectorID{ - Miner: abi.ActorID(mid), - Number: abi.SectorNumber(snum), - } - - sectorTypes := []storiface.SectorFileType{ - storiface.FTUnsealed, storiface.FTSealed, storiface.FTCache, storiface.FTUpdate, storiface.FTUpdateCache, - } - - byId := make(map[storiface.ID]*storedSector) - for _, sectorType := range sectorTypes { - infos, err := minerApi.StorageFindSector(ctx, sid, sectorType, 0, false) - if err != nil { - return xerrors.Errorf("finding sector type %d: %w", sectorType, err) - } - - for _, info := range infos { - sts, ok := byId[info.ID] - if !ok { - sts = &storedSector{ - id: info.ID, - store: info, - types: make(map[storiface.SectorFileType]bool), - } - byId[info.ID] = sts - } - sts.types[sectorType] = true - } - } - - local, err := minerApi.StorageLocal(ctx) - if err != nil { - return err - } - - var out []*storedSector - for _, sector := range byId { - out = append(out, sector) - } - sort.Slice(out, func(i, j int) bool { - return out[i].id < out[j].id - }) - - for _, info := range out { - var types []string - for sectorType, present := range info.types { - if present { - types = append(types, sectorType.String()) - } - } - sort.Strings(types) // Optional: Sort types for consistent output - fmt.Printf("In %s (%s)\n", info.id, strings.Join(types, ", ")) - fmt.Printf("\tSealing: %t; Storage: %t\n", info.store.CanSeal, info.store.CanStore) - if localPath, ok := local[info.id]; ok { - fmt.Printf("\tLocal (%s)\n", localPath) - } else { - fmt.Printf("\tRemote\n") - } - for _, l := range info.store.URLs { - fmt.Printf("\tURL: %s\n", l) - } - } - - return nil - }, -} diff --git a/cmd/curio/tasks/tasks.go b/cmd/curio/tasks/tasks.go deleted file mode 100644 index 71923018d9e..00000000000 --- a/cmd/curio/tasks/tasks.go +++ /dev/null @@ -1,240 +0,0 @@ -// Package tasks contains tasks that can be run by the curio command. -package tasks - -import ( - "context" - "sort" - "strings" - "time" - - logging "github.com/ipfs/go-log/v2" - "github.com/samber/lo" - "golang.org/x/exp/maps" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - - "github.com/filecoin-project/lotus/cmd/curio/deps" - curio "github.com/filecoin-project/lotus/curiosrc" - "github.com/filecoin-project/lotus/curiosrc/chainsched" - "github.com/filecoin-project/lotus/curiosrc/ffi" - "github.com/filecoin-project/lotus/curiosrc/gc" - "github.com/filecoin-project/lotus/curiosrc/message" - "github.com/filecoin-project/lotus/curiosrc/piece" - "github.com/filecoin-project/lotus/curiosrc/seal" - "github.com/filecoin-project/lotus/curiosrc/winning" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/lazy" - "github.com/filecoin-project/lotus/lib/must" - "github.com/filecoin-project/lotus/node/modules" - "github.com/filecoin-project/lotus/node/modules/dtypes" -) - -var log = logging.Logger("curio/deps") - -func StartTasks(ctx context.Context, dependencies *deps.Deps) (*harmonytask.TaskEngine, error) { - cfg := dependencies.Cfg - db := dependencies.DB - full := dependencies.Full - verif := dependencies.Verif - lw := dependencies.LW - as := dependencies.As - maddrs := dependencies.Maddrs - stor := dependencies.Stor - lstor := dependencies.LocalStore - si := dependencies.Si - var activeTasks []harmonytask.TaskInterface - - sender, sendTask := message.NewSender(full, full, db) - activeTasks = append(activeTasks, sendTask) - - chainSched := chainsched.New(full) - - var needProofParams bool - - /////////////////////////////////////////////////////////////////////// - ///// Task Selection - /////////////////////////////////////////////////////////////////////// - { - // PoSt - - if cfg.Subsystems.EnableWindowPost { - wdPostTask, wdPoStSubmitTask, derlareRecoverTask, err := curio.WindowPostScheduler( - ctx, cfg.Fees, cfg.Proving, full, verif, lw, sender, chainSched, - as, maddrs, db, stor, si, cfg.Subsystems.WindowPostMaxTasks) - - if err != nil { - return nil, err - } - activeTasks = append(activeTasks, wdPostTask, wdPoStSubmitTask, derlareRecoverTask) - needProofParams = true - } - - if cfg.Subsystems.EnableWinningPost { - winPoStTask := winning.NewWinPostTask(cfg.Subsystems.WinningPostMaxTasks, db, lw, verif, full, maddrs) - activeTasks = append(activeTasks, winPoStTask) - needProofParams = true - } - } - - slrLazy := lazy.MakeLazy(func() (*ffi.SealCalls, error) { - return ffi.NewSealCalls(stor, lstor, si), nil - }) - - { - // Piece handling - if cfg.Subsystems.EnableParkPiece { - parkPieceTask := piece.NewParkPieceTask(db, must.One(slrLazy.Val()), cfg.Subsystems.ParkPieceMaxTasks) - cleanupPieceTask := piece.NewCleanupPieceTask(db, must.One(slrLazy.Val()), 0) - activeTasks = append(activeTasks, parkPieceTask, cleanupPieceTask) - } - } - - hasAnySealingTask := cfg.Subsystems.EnableSealSDR || - cfg.Subsystems.EnableSealSDRTrees || - cfg.Subsystems.EnableSendPrecommitMsg || - cfg.Subsystems.EnablePoRepProof || - cfg.Subsystems.EnableMoveStorage || - cfg.Subsystems.EnableSendCommitMsg - { - // Sealing - - var sp *seal.SealPoller - var slr *ffi.SealCalls - if hasAnySealingTask { - sp = seal.NewPoller(db, full) - go sp.RunPoller(ctx) - - slr = must.One(slrLazy.Val()) - } - - // NOTE: Tasks with the LEAST priority are at the top - if cfg.Subsystems.EnableSealSDR { - sdrTask := seal.NewSDRTask(full, db, sp, slr, cfg.Subsystems.SealSDRMaxTasks) - activeTasks = append(activeTasks, sdrTask) - } - if cfg.Subsystems.EnableSealSDRTrees { - treesTask := seal.NewTreesTask(sp, db, slr, cfg.Subsystems.SealSDRTreesMaxTasks) - finalizeTask := seal.NewFinalizeTask(cfg.Subsystems.FinalizeMaxTasks, sp, slr, db) - activeTasks = append(activeTasks, treesTask, finalizeTask) - } - if cfg.Subsystems.EnableSendPrecommitMsg { - precommitTask := seal.NewSubmitPrecommitTask(sp, db, full, sender, as, cfg.Fees.MaxPreCommitGasFee) - activeTasks = append(activeTasks, precommitTask) - } - if cfg.Subsystems.EnablePoRepProof { - porepTask := seal.NewPoRepTask(db, full, sp, slr, cfg.Subsystems.PoRepProofMaxTasks) - activeTasks = append(activeTasks, porepTask) - needProofParams = true - } - if cfg.Subsystems.EnableMoveStorage { - moveStorageTask := seal.NewMoveStorageTask(sp, slr, db, cfg.Subsystems.MoveStorageMaxTasks) - activeTasks = append(activeTasks, moveStorageTask) - } - if cfg.Subsystems.EnableSendCommitMsg { - commitTask := seal.NewSubmitCommitTask(sp, db, full, sender, as, cfg.Fees.MaxCommitGasFee) - activeTasks = append(activeTasks, commitTask) - } - } - - if hasAnySealingTask { - // Sealing nodes maintain storage index when bored - storageEndpointGcTask := gc.NewStorageEndpointGC(si, stor, db) - activeTasks = append(activeTasks, storageEndpointGcTask) - } - - if needProofParams { - for spt := range dependencies.ProofTypes { - if err := modules.GetParams(true)(spt); err != nil { - return nil, xerrors.Errorf("getting params: %w", err) - } - } - } - - minerAddresses := make([]string, 0, len(maddrs)) - for k := range maddrs { - minerAddresses = append(minerAddresses, address.Address(k).String()) - } - - log.Infow("This Curio instance handles", - "miner_addresses", minerAddresses, - "tasks", lo.Map(activeTasks, func(t harmonytask.TaskInterface, _ int) string { return t.TypeDetails().Name })) - - // harmony treats the first task as highest priority, so reverse the order - // (we could have just appended to this list in the reverse order, but defining - // tasks in pipeline order is more intuitive) - activeTasks = lo.Reverse(activeTasks) - - ht, err := harmonytask.New(db, activeTasks, dependencies.ListenAddr) - if err != nil { - return nil, err - } - go machineDetails(dependencies, activeTasks, ht.ResourcesAvailable().MachineID) - - if hasAnySealingTask { - watcher, err := message.NewMessageWatcher(db, ht, chainSched, full) - if err != nil { - return nil, err - } - _ = watcher - } - - if cfg.Subsystems.EnableWindowPost || hasAnySealingTask { - go chainSched.Run(ctx) - } - - return ht, nil -} - -func machineDetails(deps *deps.Deps, activeTasks []harmonytask.TaskInterface, machineID int) { - taskNames := lo.Map(activeTasks, func(item harmonytask.TaskInterface, _ int) string { - return item.TypeDetails().Name - }) - - miners := lo.Map(maps.Keys(deps.Maddrs), func(item dtypes.MinerAddress, _ int) string { - return address.Address(item).String() - }) - sort.Strings(miners) - - _, err := deps.DB.Exec(context.Background(), `INSERT INTO harmony_machine_details - (tasks, layers, startup_time, miners, machine_id) VALUES ($1, $2, $3, $4, $5) - ON CONFLICT (machine_id) DO UPDATE SET tasks=$1, layers=$2, startup_time=$3, miners=$4`, - strings.Join(taskNames, ","), strings.Join(deps.Layers, ","), - time.Now(), strings.Join(miners, ","), machineID) - - if err != nil { - log.Errorf("failed to update machine details: %s", err) - return - } - - // maybePostWarning - if !lo.Contains(taskNames, "WdPost") && !lo.Contains(taskNames, "WinPost") { - // Maybe we aren't running a PoSt for these miners? - var allMachines []struct { - MachineID int `db:"machine_id"` - Miners string `db:"miners"` - Tasks string `db:"tasks"` - } - err := deps.DB.Select(context.Background(), &allMachines, `SELECT machine_id, miners, tasks FROM harmony_machine_details`) - if err != nil { - log.Errorf("failed to get machine details: %s", err) - return - } - - for _, miner := range miners { - var myPostIsHandled bool - for _, m := range allMachines { - if !lo.Contains(strings.Split(m.Miners, ","), miner) { - continue - } - if lo.Contains(strings.Split(m.Tasks, ","), "WdPost") && lo.Contains(strings.Split(m.Tasks, ","), "WinPost") { - myPostIsHandled = true - break - } - } - if !myPostIsHandled { - log.Errorf("No PoSt tasks are running for miner %s. Start handling PoSts immediately with:\n\tcurio run --layers=\"post\" ", miner) - } - } - } -} diff --git a/cmd/lotus-bench/cli.go b/cmd/lotus-bench/cli.go index 4379036d33a..526db8e22d5 100644 --- a/cmd/lotus-bench/cli.go +++ b/cmd/lotus-bench/cli.go @@ -304,9 +304,9 @@ func (c *CMD) Stop() { func (c *CMD) Report() { total := time.Since(c.start) - fmt.Fprintf(c.w, "[%s]:\n", c.cmd) - fmt.Fprintf(c.w, "- Options:\n") - fmt.Fprintf(c.w, " - concurrency: %d\n", c.concurrency) - fmt.Fprintf(c.w, " - qps: %d\n", c.qps) + fmt.Fprintf(c.w, "[%s]:\n", c.cmd) //nolint:errcheck + fmt.Fprintf(c.w, "- Options:\n") //nolint:errcheck + fmt.Fprintf(c.w, " - concurrency: %d\n", c.concurrency) //nolint:errcheck + fmt.Fprintf(c.w, " - qps: %d\n", c.qps) //nolint:errcheck c.reporter.Print(total, c.w) } diff --git a/cmd/lotus-bench/main.go b/cmd/lotus-bench/main.go index 1a7a0d08792..8aa6768aabf 100644 --- a/cmd/lotus-bench/main.go +++ b/cmd/lotus-bench/main.go @@ -100,7 +100,7 @@ func main() { app := &cli.App{ Name: "lotus-bench", Usage: "Benchmark performance of lotus on your hardware", - Version: build.UserVersion(), + Version: string(build.NodeUserVersion()), DisableSliceFlagSeparator: true, Commands: []*cli.Command{ proveCmd, diff --git a/cmd/lotus-bench/reporter.go b/cmd/lotus-bench/reporter.go index 7ade7b19d00..d33f94c7d6d 100644 --- a/cmd/lotus-bench/reporter.go +++ b/cmd/lotus-bench/reporter.go @@ -93,16 +93,16 @@ func (r *Reporter) Print(elapsed time.Duration, w io.Writer) { totalLatency += latency } - fmt.Fprintf(w, "- Total Requests: %d\n", nrReq) - fmt.Fprintf(w, "- Total Duration: %dms\n", elapsed.Milliseconds()) - fmt.Fprintf(w, "- Requests/sec: %f\n", float64(nrReq)/elapsed.Seconds()) - fmt.Fprintf(w, "- Avg latency: %dms\n", totalLatency/nrReq) - fmt.Fprintf(w, "- Median latency: %dms\n", r.latencies[nrReq/2]) - fmt.Fprintf(w, "- Latency distribution:\n") + fmt.Fprintf(w, "- Total Requests: %d\n", nrReq) //nolint:errcheck + fmt.Fprintf(w, "- Total Duration: %dms\n", elapsed.Milliseconds()) //nolint:errcheck + fmt.Fprintf(w, "- Requests/sec: %f\n", float64(nrReq)/elapsed.Seconds()) //nolint:errcheck + fmt.Fprintf(w, "- Avg latency: %dms\n", totalLatency/nrReq) //nolint:errcheck + fmt.Fprintf(w, "- Median latency: %dms\n", r.latencies[nrReq/2]) //nolint:errcheck + fmt.Fprintf(w, "- Latency distribution:\n") //nolint:errcheck percentiles := []float64{0.1, 0.5, 0.9, 0.95, 0.99, 0.999} for _, p := range percentiles { idx := int64(p * float64(nrReq)) - fmt.Fprintf(w, " %s%% in %dms\n", fmt.Sprintf("%.2f", p*100.0), r.latencies[idx]) + _, _ = fmt.Fprintf(w, " %s%% in %dms\n", fmt.Sprintf("%.2f", p*100.0), r.latencies[idx]) } // create a simple histogram with 10 buckets spanning the range of latency @@ -135,19 +135,19 @@ func (r *Reporter) Print(elapsed time.Duration, w io.Writer) { } // print the histogram using a tabwriter which will align the columns nicely - fmt.Fprintf(w, "- Histogram:\n") + _, _ = fmt.Fprintf(w, "- Histogram:\n") const padding = 2 tabWriter := tabwriter.NewWriter(w, 0, 0, padding, ' ', tabwriter.AlignRight|tabwriter.Debug) for i := 0; i < nrBucket; i++ { ratio := float64(buckets[i].cnt) / float64(nrReq) bars := strings.Repeat("#", int(ratio*100)) - fmt.Fprintf(tabWriter, " %d-%dms\t%d\t%s (%s%%)\n", buckets[i].start, buckets[i].end, buckets[i].cnt, bars, fmt.Sprintf("%.2f", ratio*100)) + _, _ = fmt.Fprintf(tabWriter, " %d-%dms\t%d\t%s (%s%%)\n", buckets[i].start, buckets[i].end, buckets[i].cnt, bars, fmt.Sprintf("%.2f", ratio*100)) } tabWriter.Flush() //nolint:errcheck - fmt.Fprintf(w, "- Status codes:\n") + _, _ = fmt.Fprintf(w, "- Status codes:\n") for code, cnt := range r.statusCodes { - fmt.Fprintf(w, " [%d]: %d\n", code, cnt) + _, _ = fmt.Fprintf(w, " [%d]: %d\n", code, cnt) } // print the 10 most occurring errors (in case error values are not unique) @@ -163,12 +163,12 @@ func (r *Reporter) Print(elapsed time.Duration, w io.Writer) { sort.Slice(sortedErrors, func(i, j int) bool { return sortedErrors[i].cnt > sortedErrors[j].cnt }) - fmt.Fprintf(w, "- Errors (top 10):\n") + _, _ = fmt.Fprintf(w, "- Errors (top 10):\n") for i, se := range sortedErrors { if i > 10 { break } - fmt.Fprintf(w, " [%s]: %d\n", se.err, se.cnt) + _, _ = fmt.Fprintf(w, " [%s]: %d\n", se.err, se.cnt) } } diff --git a/cmd/lotus-bench/rpc.go b/cmd/lotus-bench/rpc.go index 4af4bdb27ee..547d6fb4925 100644 --- a/cmd/lotus-bench/rpc.go +++ b/cmd/lotus-bench/rpc.go @@ -395,10 +395,10 @@ func (rpc *RPCMethod) Stop() { func (rpc *RPCMethod) Report() { total := time.Since(rpc.start) - fmt.Fprintf(rpc.w, "[%s]:\n", rpc.method) - fmt.Fprintf(rpc.w, "- Options:\n") - fmt.Fprintf(rpc.w, " - concurrency: %d\n", rpc.concurrency) - fmt.Fprintf(rpc.w, " - params: %s\n", rpc.params) - fmt.Fprintf(rpc.w, " - qps: %d\n", rpc.qps) + fmt.Fprintf(rpc.w, "[%s]:\n", rpc.method) //nolint:errcheck + fmt.Fprintf(rpc.w, "- Options:\n") //nolint:errcheck + fmt.Fprintf(rpc.w, " - concurrency: %d\n", rpc.concurrency) //nolint:errcheck + fmt.Fprintf(rpc.w, " - params: %s\n", rpc.params) //nolint:errcheck + fmt.Fprintf(rpc.w, " - qps: %d\n", rpc.qps) //nolint:errcheck rpc.reporter.Print(total, rpc.w) } diff --git a/cmd/lotus-fountain/main.go b/cmd/lotus-fountain/main.go index 36d5faf0c29..191a91da403 100644 --- a/cmd/lotus-fountain/main.go +++ b/cmd/lotus-fountain/main.go @@ -41,7 +41,7 @@ func main() { app := &cli.App{ Name: "lotus-fountain", Usage: "Devnet token distribution utility", - Version: build.UserVersion(), + Version: string(build.NodeUserVersion()), Flags: []cli.Flag{ &cli.StringFlag{ Name: "repo", diff --git a/cmd/lotus-gateway/main.go b/cmd/lotus-gateway/main.go index 35a43e18b1e..2c5279ed448 100644 --- a/cmd/lotus-gateway/main.go +++ b/cmd/lotus-gateway/main.go @@ -40,7 +40,7 @@ func main() { app := &cli.App{ Name: "lotus-gateway", Usage: "Public API server for lotus", - Version: build.UserVersion(), + Version: string(build.NodeUserVersion()), Flags: []cli.Flag{ &cli.StringFlag{ Name: "repo", diff --git a/cmd/lotus-health/main.go b/cmd/lotus-health/main.go index 59c81e7c91a..8e58624b673 100644 --- a/cmd/lotus-health/main.go +++ b/cmd/lotus-health/main.go @@ -36,7 +36,7 @@ func main() { app := &cli.App{ Name: "lotus-health", Usage: "Tools for monitoring lotus daemon health", - Version: build.UserVersion(), + Version: string(build.NodeUserVersion()), Commands: local, Flags: []cli.Flag{ &cli.StringFlag{ diff --git a/cmd/lotus-miner/allinfo_test.go b/cmd/lotus-miner/allinfo_test.go index 2388f2f7aeb..a0735fc9771 100644 --- a/cmd/lotus-miner/allinfo_test.go +++ b/cmd/lotus-miner/allinfo_test.go @@ -2,7 +2,6 @@ package main import ( - "context" "flag" "testing" "time" @@ -43,11 +42,5 @@ func TestMinerAllInfo(t *testing.T) { t.Run("pre-info-all", run) - //stm: @CLIENT_DATA_IMPORT_001, @CLIENT_STORAGE_DEALS_GET_001 - dh := kit.NewDealHarness(t, client, miner, miner) - deal, res, inPath := dh.MakeOnlineDeal(context.Background(), kit.MakeFullDealParams{Rseed: 6}) - outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false) - kit.AssertFilesEqual(t, inPath, outPath) - t.Run("post-info-all", run) } diff --git a/cmd/lotus-miner/dagstore.go b/cmd/lotus-miner/dagstore.go deleted file mode 100644 index c0e37f63bf0..00000000000 --- a/cmd/lotus-miner/dagstore.go +++ /dev/null @@ -1,282 +0,0 @@ -package main - -import ( - "fmt" - "os" - "strings" - - "github.com/fatih/color" - "github.com/ipfs/go-cid" - "github.com/urfave/cli/v2" - - "github.com/filecoin-project/lotus/api" - lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/lib/tablewriter" -) - -var dagstoreCmd = &cli.Command{ - Name: "dagstore", - Usage: "Manage the dagstore on the markets subsystem", - Subcommands: []*cli.Command{ - dagstoreListShardsCmd, - dagstoreRegisterShardCmd, - dagstoreInitializeShardCmd, - dagstoreRecoverShardCmd, - dagstoreInitializeAllCmd, - dagstoreGcCmd, - dagstoreLookupPiecesCmd, - }, -} - -var dagstoreListShardsCmd = &cli.Command{ - Name: "list-shards", - Usage: "List all shards known to the dagstore, with their current status", - Action: func(cctx *cli.Context) error { - marketsApi, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := lcli.ReqContext(cctx) - - shards, err := marketsApi.DagstoreListShards(ctx) - if err != nil { - return err - } - - return printTableShards(shards) - }, -} - -var dagstoreRegisterShardCmd = &cli.Command{ - Name: "register-shard", - ArgsUsage: "[key]", - Usage: "Register a shard", - Action: func(cctx *cli.Context) error { - if cctx.NArg() != 1 { - return lcli.IncorrectNumArgs(cctx) - } - - marketsAPI, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := lcli.ReqContext(cctx) - - shardKey := cctx.Args().First() - err = marketsAPI.DagstoreRegisterShard(ctx, shardKey) - if err != nil { - return err - } - - fmt.Println("Registered shard " + shardKey) - return nil - }, -} - -var dagstoreInitializeShardCmd = &cli.Command{ - Name: "initialize-shard", - ArgsUsage: "[key]", - Usage: "Initialize the specified shard", - Action: func(cctx *cli.Context) error { - if cctx.NArg() != 1 { - return lcli.IncorrectNumArgs(cctx) - } - - marketsApi, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := lcli.ReqContext(cctx) - - return marketsApi.DagstoreInitializeShard(ctx, cctx.Args().First()) - }, -} - -var dagstoreRecoverShardCmd = &cli.Command{ - Name: "recover-shard", - ArgsUsage: "[key]", - Usage: "Attempt to recover a shard in errored state", - Action: func(cctx *cli.Context) error { - if cctx.NArg() != 1 { - return lcli.IncorrectNumArgs(cctx) - } - - marketsApi, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := lcli.ReqContext(cctx) - - return marketsApi.DagstoreRecoverShard(ctx, cctx.Args().First()) - }, -} - -var dagstoreInitializeAllCmd = &cli.Command{ - Name: "initialize-all", - Usage: "Initialize all uninitialized shards, streaming results as they're produced; only shards for unsealed pieces are initialized by default", - Flags: []cli.Flag{ - &cli.UintFlag{ - Name: "concurrency", - Usage: "maximum shards to initialize concurrently at a time; use 0 for unlimited", - Required: true, - }, - &cli.BoolFlag{ - Name: "include-sealed", - Usage: "initialize sealed pieces as well", - }, - }, - Action: func(cctx *cli.Context) error { - concurrency := cctx.Uint("concurrency") - sealed := cctx.Bool("sealed") - - marketsApi, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := lcli.ReqContext(cctx) - - params := api.DagstoreInitializeAllParams{ - MaxConcurrency: int(concurrency), - IncludeSealed: sealed, - } - - ch, err := marketsApi.DagstoreInitializeAll(ctx, params) - if err != nil { - return err - } - - for { - select { - case evt, ok := <-ch: - if !ok { - return nil - } - _, _ = fmt.Fprint(os.Stdout, color.New(color.BgHiBlack).Sprintf("(%d/%d)", evt.Current, evt.Total)) - _, _ = fmt.Fprint(os.Stdout, " ") - if evt.Event == "start" { - _, _ = fmt.Fprintln(os.Stdout, evt.Key, color.New(color.Reset).Sprint("STARTING")) - } else { - if evt.Success { - _, _ = fmt.Fprintln(os.Stdout, evt.Key, color.New(color.FgGreen).Sprint("SUCCESS")) - } else { - _, _ = fmt.Fprintln(os.Stdout, evt.Key, color.New(color.FgRed).Sprint("ERROR"), evt.Error) - } - } - - case <-ctx.Done(): - return fmt.Errorf("aborted") - } - } - }, -} - -var dagstoreGcCmd = &cli.Command{ - Name: "gc", - Usage: "Garbage collect the dagstore", - Action: func(cctx *cli.Context) error { - marketsApi, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := lcli.ReqContext(cctx) - - collected, err := marketsApi.DagstoreGC(ctx) - if err != nil { - return err - } - - if len(collected) == 0 { - _, _ = fmt.Fprintln(os.Stdout, "no shards collected") - return nil - } - - for _, e := range collected { - if e.Error == "" { - _, _ = fmt.Fprintln(os.Stdout, e.Key, color.New(color.FgGreen).Sprint("SUCCESS")) - } else { - _, _ = fmt.Fprintln(os.Stdout, e.Key, color.New(color.FgRed).Sprint("ERROR"), e.Error) - } - } - - return nil - }, -} - -func printTableShards(shards []api.DagstoreShardInfo) error { - if len(shards) == 0 { - return nil - } - - tw := tablewriter.New( - tablewriter.Col("Key"), - tablewriter.Col("State"), - tablewriter.Col("Error"), - ) - - colors := map[string]color.Attribute{ - "ShardStateAvailable": color.FgGreen, - "ShardStateServing": color.FgBlue, - "ShardStateErrored": color.FgRed, - "ShardStateNew": color.FgYellow, - } - - for _, s := range shards { - m := map[string]interface{}{ - "Key": s.Key, - "State": func() string { - trimmedState := strings.TrimPrefix(s.State, "ShardState") - if c, ok := colors[s.State]; ok { - return color.New(c).Sprint(trimmedState) - } - return trimmedState - }(), - "Error": s.Error, - } - tw.Write(m) - } - return tw.Flush(os.Stdout) -} - -var dagstoreLookupPiecesCmd = &cli.Command{ - Name: "lookup-pieces", - Usage: "Lookup pieces that a given CID belongs to", - ArgsUsage: "", - Action: func(cctx *cli.Context) error { - if cctx.NArg() != 1 { - return lcli.IncorrectNumArgs(cctx) - } - - cidStr := cctx.Args().First() - cid, err := cid.Parse(cidStr) - if err != nil { - return fmt.Errorf("invalid CID: %w", err) - } - - marketsApi, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := lcli.ReqContext(cctx) - - shards, err := marketsApi.DagstoreLookupPieces(ctx, cid) - if err != nil { - return err - } - - return printTableShards(shards) - }, -} diff --git a/cmd/lotus-miner/index_provider.go b/cmd/lotus-miner/index_provider.go deleted file mode 100644 index 2b6838a4b3f..00000000000 --- a/cmd/lotus-miner/index_provider.go +++ /dev/null @@ -1,62 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/ipfs/go-cid" - "github.com/urfave/cli/v2" - - lcli "github.com/filecoin-project/lotus/cli" -) - -var indexProvCmd = &cli.Command{ - Name: "index", - Usage: "Manage the index provider on the markets subsystem", - Subcommands: []*cli.Command{ - indexProvAnnounceCmd, - indexProvAnnounceAllCmd, - }, -} - -var indexProvAnnounceCmd = &cli.Command{ - Name: "announce", - ArgsUsage: "", - Usage: "Announce a deal to indexers so they can download its index", - Action: func(cctx *cli.Context) error { - if cctx.NArg() != 1 { - return lcli.IncorrectNumArgs(cctx) - } - - proposalCidStr := cctx.Args().First() - proposalCid, err := cid.Parse(proposalCidStr) - if err != nil { - return fmt.Errorf("invalid deal proposal CID: %w", err) - } - - marketsApi, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := lcli.ReqContext(cctx) - - return marketsApi.IndexerAnnounceDeal(ctx, proposalCid) - }, -} - -var indexProvAnnounceAllCmd = &cli.Command{ - Name: "announce-all", - Usage: "Announce all active deals to indexers so they can download the indices", - Action: func(cctx *cli.Context) error { - marketsApi, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := lcli.ReqContext(cctx) - - return marketsApi.IndexerAnnounceAllDeals(ctx) - }, -} diff --git a/cmd/lotus-miner/info_all.go b/cmd/lotus-miner/info_all.go index 5b83467a2f8..253d2befc67 100644 --- a/cmd/lotus-miner/info_all.go +++ b/cmd/lotus-miner/info_all.go @@ -112,72 +112,6 @@ var infoAllCmd = &cli.Command{ fmt.Println("ERROR: ", err) } - fmt.Println("\n#: Storage Ask") - if err := getAskCmd.Action(cctx); err != nil { - fmt.Println("ERROR: ", err) - } - - fmt.Println("\n#: Storage Deals") - { - fs := &flag.FlagSet{} - for _, f := range dealsListCmd.Flags { - if err := f.Apply(fs); err != nil { - fmt.Println("ERROR: ", err) - } - } - if err := fs.Parse([]string{"--verbose"}); err != nil { - fmt.Println("ERROR: ", err) - } - - if err := dealsListCmd.Action(cli.NewContext(cctx.App, fs, cctx)); err != nil { - fmt.Println("ERROR: ", err) - } - } - - fmt.Println("\n#: Storage Deals JSON") - { - fs := &flag.FlagSet{} - for _, f := range dealsListCmd.Flags { - if err := f.Apply(fs); err != nil { - fmt.Println("ERROR: ", err) - } - } - if err := fs.Parse([]string{"--verbose", "--format=json"}); err != nil { - fmt.Println("ERROR: ", err) - } - - if err := dealsListCmd.Action(cli.NewContext(cctx.App, fs, cctx)); err != nil { - fmt.Println("ERROR: ", err) - } - } - - fmt.Println("\n#: Data Transfers") - { - fs := &flag.FlagSet{} - for _, f := range transfersListCmd.Flags { - if err := f.Apply(fs); err != nil { - fmt.Println("ERROR: ", err) - } - } - if err := fs.Parse([]string{"--verbose", "--completed", "--show-failed"}); err != nil { - fmt.Println("ERROR: ", err) - } - - if err := transfersListCmd.Action(cli.NewContext(cctx.App, fs, cctx)); err != nil { - fmt.Println("ERROR: ", err) - } - } - - fmt.Println("\n#: DAGStore shards") - if err := dagstoreListShardsCmd.Action(cctx); err != nil { - fmt.Println("ERROR: ", err) - } - - fmt.Println("\n#: Pending Batch Deals") - if err := dealsPendingPublish.Action(cctx); err != nil { - fmt.Println("ERROR: ", err) - } - fmt.Println("\n#: Pending Batch Terminations") if err := sectorsTerminatePendingCmd.Action(cctx); err != nil { fmt.Println("ERROR: ", err) @@ -217,11 +151,6 @@ var infoAllCmd = &cli.Command{ fmt.Println("ERROR: ", err) } - fmt.Println("\n#: Sector Refs") - if err := sectorsRefsCmd.Action(cctx); err != nil { - fmt.Println("ERROR: ", err) - } - // Very Very Verbose info fmt.Println("\n#: Per Sector Info") diff --git a/cmd/lotus-miner/init.go b/cmd/lotus-miner/init.go index 9ab4e8b05b0..621cb078e7e 100644 --- a/cmd/lotus-miner/init.go +++ b/cmd/lotus-miner/init.go @@ -129,7 +129,6 @@ var initCmd = &cli.Command{ }, Subcommands: []*cli.Command{ restoreCmd, - serviceCmd, }, Action: func(cctx *cli.Context) error { log.Info("Initializing lotus miner") diff --git a/cmd/lotus-miner/init_service.go b/cmd/lotus-miner/init_service.go deleted file mode 100644 index 876313941f9..00000000000 --- a/cmd/lotus-miner/init_service.go +++ /dev/null @@ -1,159 +0,0 @@ -package main - -import ( - "context" - "strings" - - "github.com/libp2p/go-libp2p/core/peer" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/big" - - "github.com/filecoin-project/lotus/api" - lapi "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/client" - lcli "github.com/filecoin-project/lotus/cli" - cliutil "github.com/filecoin-project/lotus/cli/util" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -const ( - MarketsService = "markets" -) - -var serviceCmd = &cli.Command{ - Name: "service", - Usage: "Initialize a lotus miner sub-service", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "config", - Usage: "config file (config.toml)", - Required: true, - }, - &cli.BoolFlag{ - Name: "nosync", - Usage: "don't check full-node sync status", - }, - &cli.StringSliceFlag{ - Name: "type", - Usage: "type of service to be enabled", - }, - &cli.StringFlag{ - Name: "api-sealer", - Usage: "sealer API info (lotus-miner auth api-info --perm=admin)", - }, - &cli.StringFlag{ - Name: "api-sector-index", - Usage: "sector Index API info (lotus-miner auth api-info --perm=admin)", - }, - }, - ArgsUsage: "[backupFile]", - Action: func(cctx *cli.Context) error { - ctx := lcli.ReqContext(cctx) - log.Info("Initializing lotus miner service") - - es := EnabledServices(cctx.StringSlice("type")) - - if len(es) == 0 { - return xerrors.Errorf("at least one module must be enabled") - } - - // we should remove this as soon as we have more service types and not just `markets` - if !es.Contains(MarketsService) { - return xerrors.Errorf("markets module must be enabled") - } - - if !cctx.IsSet("api-sealer") { - return xerrors.Errorf("--api-sealer is required without the sealer module enabled") - } - if !cctx.IsSet("api-sector-index") { - return xerrors.Errorf("--api-sector-index is required without the sector storage module enabled") - } - - repoPath := cctx.String(FlagMarketsRepo) - if repoPath == "" { - return xerrors.Errorf("please provide Lotus markets repo path via flag %s", FlagMarketsRepo) - } - - if err := restore(ctx, cctx, repoPath, &storiface.StorageConfig{}, func(cfg *config.StorageMiner) error { - cfg.Subsystems.EnableMarkets = es.Contains(MarketsService) - cfg.Subsystems.EnableMining = false - cfg.Subsystems.EnableSealing = false - cfg.Subsystems.EnableSectorStorage = false - - if !cfg.Subsystems.EnableSealing { - ai, err := checkApiInfo(ctx, cctx.String("api-sealer")) - if err != nil { - return xerrors.Errorf("checking sealer API: %w", err) - } - cfg.Subsystems.SealerApiInfo = ai - } - - if !cfg.Subsystems.EnableSectorStorage { - ai, err := checkApiInfo(ctx, cctx.String("api-sector-index")) - if err != nil { - return xerrors.Errorf("checking sector index API: %w", err) - } - cfg.Subsystems.SectorIndexApiInfo = ai - } - - return nil - }, func(api lapi.FullNode, maddr address.Address, peerid peer.ID, mi api.MinerInfo) error { - if es.Contains(MarketsService) { - log.Info("Configuring miner actor") - - if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero(), cctx.Uint64("confidence")); err != nil { - return err - } - } - - return nil - }); err != nil { - return err - } - - return nil - }, -} - -type EnabledServices []string - -func (es EnabledServices) Contains(name string) bool { - for _, s := range es { - if s == name { - return true - } - } - return false -} - -func checkApiInfo(ctx context.Context, ai string) (string, error) { - ai = strings.TrimPrefix(strings.TrimSpace(ai), "MINER_API_INFO=") - info := cliutil.ParseApiInfo(ai) - addr, err := info.DialArgs("v0") - if err != nil { - return "", xerrors.Errorf("could not get DialArgs: %w", err) - } - - log.Infof("Checking api version of %s", addr) - - api, closer, err := client.NewStorageMinerRPCV0(ctx, addr, info.AuthHeader()) - if err != nil { - return "", err - } - defer closer() - - v, err := api.Version(ctx) - if err != nil { - return "", xerrors.Errorf("checking version: %w", err) - } - - if !v.APIVersion.EqMajorMinor(lapi.MinerAPIVersion0) { - return "", xerrors.Errorf("remote service API version didn't match (expected %s, remote %s)", lapi.MinerAPIVersion0, v.APIVersion) - } - - return ai, nil -} diff --git a/cmd/lotus-miner/main.go b/cmd/lotus-miner/main.go index 1fc7abfa8da..76b8c0deb05 100644 --- a/cmd/lotus-miner/main.go +++ b/cmd/lotus-miner/main.go @@ -23,8 +23,7 @@ import ( var log = logging.Logger("main") const ( - FlagMinerRepo = "miner-repo" - FlagMarketsRepo = "markets-repo" + FlagMinerRepo = "miner-repo" ) // TODO remove after deprecation period @@ -43,16 +42,10 @@ func main() { backupCmd, lcli.WithCategory("chain", actorCmd), lcli.WithCategory("chain", infoCmd), - lcli.WithCategory("market", setHidden(storageDealsCmd)), - lcli.WithCategory("market", setHidden(retrievalDealsCmd)), - lcli.WithCategory("market", setHidden(dataTransfersCmd)), - lcli.WithCategory("market", setHidden(dagstoreCmd)), - lcli.WithCategory("market", setHidden(indexProvCmd)), lcli.WithCategory("storage", sectorsCmd), lcli.WithCategory("storage", provingCmd), lcli.WithCategory("storage", storageCmd), lcli.WithCategory("storage", sealingCmd), - lcli.WithCategory("retrieval", setHidden(piecesCmd)), } jaeger := tracing.SetupJaegerTracing("lotus") @@ -83,25 +76,10 @@ func main() { } } - // adapt the Net* commands to always hit the node running the markets - // subsystem, as that is the only one that runs a libp2p node. - netCmd := *lcli.NetCmd // make a copy. - netCmd.Hidden = true - prev := netCmd.Before - netCmd.Before = func(c *cli.Context) error { - if prev != nil { - if err := prev(c); err != nil { - return err - } - } - c.App.Metadata["repoType"] = repo.Markets - return nil - } - app := &cli.App{ Name: "lotus-miner", Usage: "Filecoin decentralized storage network miner", - Version: build.UserVersion(), + Version: string(build.MinerUserVersion()), EnableBashCompletion: true, Flags: []cli.Flag{ &cli.StringFlag{ @@ -135,31 +113,13 @@ func main() { Value: "~/.lotusminer", // TODO: Consider XDG_DATA_HOME Usage: fmt.Sprintf("Specify miner repo path. flag(%s) and env(LOTUS_STORAGE_PATH) are DEPRECATION, will REMOVE SOON", FlagMinerRepoDeprecation), }, - &cli.StringFlag{ - Name: FlagMarketsRepo, - EnvVars: []string{"LOTUS_MARKETS_PATH"}, - Hidden: true, - }, - &cli.BoolFlag{ - Name: "call-on-markets", - Usage: "(experimental; may be removed) call this command against a markets node; use only with common commands like net, auth, pprof, etc. whose target may be ambiguous", - Hidden: true, - }, cliutil.FlagVeryVerbose, }, - Commands: append(local, append(lcli.CommonCommands, &netCmd)...), - Before: func(c *cli.Context) error { - // this command is explicitly called on markets, inform - // common commands by overriding the repoType. - if c.Bool("call-on-markets") { - c.App.Metadata["repoType"] = repo.Markets - } - return nil - }, + Commands: append(local, lcli.CommonCommands...), After: func(c *cli.Context) error { if r := recover(); r != nil { // Generate report in LOTUS_PATH and re-raise panic - build.GeneratePanicReport(c.String("panic-reports"), c.String(FlagMinerRepo), c.App.Name) + build.GenerateMinerPanicReport(c.String("panic-reports"), c.String(FlagMinerRepo), c.App.Name) panic(r) } return nil @@ -193,11 +153,6 @@ func getActorAddress(ctx context.Context, cctx *cli.Context) (maddr address.Addr return maddr, nil } -func setHidden(cmd *cli.Command) *cli.Command { - cmd.Hidden = true - return cmd -} - func LMActorOrEnvGetter(cctx *cli.Context) (address.Address, error) { return getActorAddress(cctx.Context, cctx) } diff --git a/cmd/lotus-miner/market.go b/cmd/lotus-miner/market.go deleted file mode 100644 index 29eb662a78d..00000000000 --- a/cmd/lotus-miner/market.go +++ /dev/null @@ -1,1039 +0,0 @@ -package main - -import ( - "bufio" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strconv" - "text/tabwriter" - "time" - - tm "github.com/buger/goterm" - "github.com/docker/go-units" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-cidutil/cidenc" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/multiformats/go-multibase" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - cborutil "github.com/filecoin-project/go-cbor-util" - datatransfer "github.com/filecoin-project/go-data-transfer/v2" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/types" - lcli "github.com/filecoin-project/lotus/cli" -) - -var CidBaseFlag = cli.StringFlag{ - Name: "cid-base", - Hidden: true, - Value: "base32", - Usage: "Multibase encoding used for version 1 CIDs in output.", - DefaultText: "base32", -} - -// GetCidEncoder returns an encoder using the `cid-base` flag if provided, or -// the default (Base32) encoder if not. -func GetCidEncoder(cctx *cli.Context) (cidenc.Encoder, error) { - val := cctx.String("cid-base") - - e := cidenc.Encoder{Base: multibase.MustNewEncoder(multibase.Base32)} - - if val != "" { - var err error - e.Base, err = multibase.EncoderByName(val) - if err != nil { - return e, err - } - } - - return e, nil -} - -var storageDealSelectionCmd = &cli.Command{ - Name: "selection", - Usage: "Configure acceptance criteria for storage deal proposals", - Subcommands: []*cli.Command{ - storageDealSelectionShowCmd, - storageDealSelectionResetCmd, - storageDealSelectionRejectCmd, - }, -} - -var storageDealSelectionShowCmd = &cli.Command{ - Name: "list", - Usage: "List storage deal proposal selection criteria", - Action: func(cctx *cli.Context) error { - smapi, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - onlineOk, err := smapi.DealsConsiderOnlineStorageDeals(lcli.DaemonContext(cctx)) - if err != nil { - return err - } - - offlineOk, err := smapi.DealsConsiderOfflineStorageDeals(lcli.DaemonContext(cctx)) - if err != nil { - return err - } - - fmt.Printf("considering online storage deals: %t\n", onlineOk) - fmt.Printf("considering offline storage deals: %t\n", offlineOk) - - return nil - }, -} - -var storageDealSelectionResetCmd = &cli.Command{ - Name: "reset", - Usage: "Reset storage deal proposal selection criteria to default values", - Action: func(cctx *cli.Context) error { - smapi, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - err = smapi.DealsSetConsiderOnlineStorageDeals(lcli.DaemonContext(cctx), true) - if err != nil { - return err - } - - err = smapi.DealsSetConsiderOfflineStorageDeals(lcli.DaemonContext(cctx), true) - if err != nil { - return err - } - - err = smapi.DealsSetConsiderVerifiedStorageDeals(lcli.DaemonContext(cctx), true) - if err != nil { - return err - } - - err = smapi.DealsSetConsiderUnverifiedStorageDeals(lcli.DaemonContext(cctx), true) - if err != nil { - return err - } - - return nil - }, -} - -var storageDealSelectionRejectCmd = &cli.Command{ - Name: "reject", - Usage: "Configure criteria which necessitate automatic rejection", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "online", - }, - &cli.BoolFlag{ - Name: "offline", - }, - &cli.BoolFlag{ - Name: "verified", - }, - &cli.BoolFlag{ - Name: "unverified", - }, - }, - Action: func(cctx *cli.Context) error { - smapi, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - if cctx.Bool("online") { - err = smapi.DealsSetConsiderOnlineStorageDeals(lcli.DaemonContext(cctx), false) - if err != nil { - return err - } - } - - if cctx.Bool("offline") { - err = smapi.DealsSetConsiderOfflineStorageDeals(lcli.DaemonContext(cctx), false) - if err != nil { - return err - } - } - - if cctx.Bool("verified") { - err = smapi.DealsSetConsiderVerifiedStorageDeals(lcli.DaemonContext(cctx), false) - if err != nil { - return err - } - } - - if cctx.Bool("unverified") { - err = smapi.DealsSetConsiderUnverifiedStorageDeals(lcli.DaemonContext(cctx), false) - if err != nil { - return err - } - } - - return nil - }, -} - -var setAskCmd = &cli.Command{ - Name: "set-ask", - Usage: "Configure the miner's ask", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "price", - Usage: "Set the price of the ask for unverified deals (specified as FIL / GiB / Epoch) to `PRICE`.", - Required: true, - }, - &cli.StringFlag{ - Name: "verified-price", - Usage: "Set the price of the ask for verified deals (specified as FIL / GiB / Epoch) to `PRICE`", - Required: true, - }, - &cli.StringFlag{ - Name: "min-piece-size", - Usage: "Set minimum piece size (w/bit-padding, in bytes) in ask to `SIZE`", - DefaultText: "256B", - Value: "256B", - }, - &cli.StringFlag{ - Name: "max-piece-size", - Usage: "Set maximum piece size (w/bit-padding, in bytes) in ask to `SIZE`", - DefaultText: "miner sector size", - Value: "0", - }, - }, - Action: func(cctx *cli.Context) error { - ctx := lcli.DaemonContext(cctx) - - minerApi, closer, err := lcli.GetStorageMinerAPI(cctx) - if err != nil { - return err - } - defer closer() - - marketsApi, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - pri, err := types.ParseFIL(cctx.String("price")) - if err != nil { - return err - } - - vpri, err := types.ParseFIL(cctx.String("verified-price")) - if err != nil { - return err - } - - dur, err := time.ParseDuration("720h0m0s") - if err != nil { - return xerrors.Errorf("cannot parse duration: %w", err) - } - - qty := dur.Seconds() / float64(build.BlockDelaySecs) - - min, err := units.RAMInBytes(cctx.String("min-piece-size")) - if err != nil { - return xerrors.Errorf("cannot parse min-piece-size to quantity of bytes: %w", err) - } - - if min < 256 { - return xerrors.New("minimum piece size (w/bit-padding) is 256B") - } - - max, err := units.RAMInBytes(cctx.String("max-piece-size")) - if err != nil { - return xerrors.Errorf("cannot parse max-piece-size to quantity of bytes: %w", err) - } - - maddr, err := minerApi.ActorAddress(ctx) - if err != nil { - return err - } - - ssize, err := minerApi.ActorSectorSize(ctx, maddr) - if err != nil { - return err - } - - smax := int64(ssize) - - if max == 0 { - max = smax - } - - if max > smax { - return xerrors.Errorf("max piece size (w/bit-padding) %s cannot exceed miner sector size %s", types.SizeStr(types.NewInt(uint64(max))), types.SizeStr(types.NewInt(uint64(smax)))) - } - - return marketsApi.MarketSetAsk(ctx, types.BigInt(pri), types.BigInt(vpri), abi.ChainEpoch(qty), abi.PaddedPieceSize(min), abi.PaddedPieceSize(max)) - }, -} - -var getAskCmd = &cli.Command{ - Name: "get-ask", - Usage: "Print the miner's ask", - Flags: []cli.Flag{}, - Action: func(cctx *cli.Context) error { - ctx := lcli.DaemonContext(cctx) - - fnapi, closer, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - - smapi, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - sask, err := smapi.MarketGetAsk(ctx) - if err != nil { - return err - } - - var ask *storagemarket.StorageAsk - if sask != nil && sask.Ask != nil { - ask = sask.Ask - } - - w := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0) - fmt.Fprintf(w, "Price per GiB/Epoch\tVerified\tMin. Piece Size (padded)\tMax. Piece Size (padded)\tExpiry (Epoch)\tExpiry (Appx. Rem. Time)\tSeq. No.\n") - if ask == nil { - fmt.Fprintf(w, "\n") - - return w.Flush() - } - - head, err := fnapi.ChainHead(ctx) - if err != nil { - return err - } - - dlt := ask.Expiry - head.Height() - rem := "" - if dlt > 0 { - rem = (time.Second * time.Duration(int64(dlt)*int64(build.BlockDelaySecs))).String() - } - - fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d\t%s\t%d\n", types.FIL(ask.Price), types.FIL(ask.VerifiedPrice), types.SizeStr(types.NewInt(uint64(ask.MinPieceSize))), types.SizeStr(types.NewInt(uint64(ask.MaxPieceSize))), ask.Expiry, rem, ask.SeqNo) - - return w.Flush() - }, -} - -var storageDealsCmd = &cli.Command{ - Name: "storage-deals", - Usage: "Manage storage deals and related configuration", - Subcommands: []*cli.Command{ - dealsImportDataCmd, - dealsListCmd, - storageDealSelectionCmd, - setAskCmd, - getAskCmd, - setBlocklistCmd, - getBlocklistCmd, - resetBlocklistCmd, - setSealDurationCmd, - dealsPendingPublish, - dealsRetryPublish, - }, -} - -var dealsImportDataCmd = &cli.Command{ - Name: "import-data", - Usage: "Manually import data for a deal", - ArgsUsage: " ", - Action: func(cctx *cli.Context) error { - api, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := lcli.DaemonContext(cctx) - - if cctx.NArg() != 2 { - return lcli.IncorrectNumArgs(cctx) - } - - propCid, err := cid.Decode(cctx.Args().Get(0)) - if err != nil { - return err - } - - fpath := cctx.Args().Get(1) - - return api.DealsImportData(ctx, propCid, fpath) - - }, -} - -var dealsListCmd = &cli.Command{ - Name: "list", - Usage: "List all deals for this miner", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "format", - Usage: "output format of data, supported: table, json", - Value: "table", - }, - &cli.BoolFlag{ - Name: "verbose", - Aliases: []string{"v"}, - }, - &cli.BoolFlag{ - Name: "watch", - Usage: "watch deal updates in real-time, rather than a one time list", - }, - }, - Action: func(cctx *cli.Context) error { - switch cctx.String("format") { - case "table": - return listDealsWithTable(cctx) - case "json": - return listDealsWithJSON(cctx) - } - - return fmt.Errorf("unknown format: %s; use `table` or `json`", cctx.String("format")) - }, -} - -func listDealsWithTable(cctx *cli.Context) error { - api, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := lcli.DaemonContext(cctx) - - deals, err := api.MarketListIncompleteDeals(ctx) - if err != nil { - return err - } - - verbose := cctx.Bool("verbose") - watch := cctx.Bool("watch") - - if watch { - updates, err := api.MarketGetDealUpdates(ctx) - if err != nil { - return err - } - - for { - tm.Clear() - tm.MoveCursor(1, 1) - - err = outputStorageDealsTable(tm.Output, deals, verbose) - if err != nil { - return err - } - - tm.Flush() - - select { - case <-ctx.Done(): - return nil - case updated := <-updates: - var found bool - for i, existing := range deals { - if existing.ProposalCid.Equals(updated.ProposalCid) { - deals[i] = updated - found = true - break - } - } - if !found { - deals = append(deals, updated) - } - } - } - } - - return outputStorageDealsTable(os.Stdout, deals, verbose) -} - -func outputStorageDealsTable(out io.Writer, deals []storagemarket.MinerDeal, verbose bool) error { - sort.Slice(deals, func(i, j int) bool { - return deals[i].CreationTime.Time().Before(deals[j].CreationTime.Time()) - }) - - w := tabwriter.NewWriter(out, 2, 4, 2, ' ', 0) - - if verbose { - _, _ = fmt.Fprintf(w, "Creation\tVerified\tProposalCid\tDealId\tState\tClient\tSize\tPrice\tDuration\tTransferChannelID\tMessage\n") - } else { - _, _ = fmt.Fprintf(w, "ProposalCid\tDealId\tState\tClient\tSize\tPrice\tDuration\n") - } - - for _, deal := range deals { - propcid := deal.ProposalCid.String() - if !verbose { - propcid = "..." + propcid[len(propcid)-8:] - } - - fil := types.FIL(types.BigMul(deal.Proposal.StoragePricePerEpoch, types.NewInt(uint64(deal.Proposal.Duration())))) - - if verbose { - _, _ = fmt.Fprintf(w, "%s\t%t\t", deal.CreationTime.Time().Format(time.Stamp), deal.Proposal.VerifiedDeal) - } - - _, _ = fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\t%s\t%s", propcid, deal.DealID, storagemarket.DealStates[deal.State], deal.Proposal.Client, units.BytesSize(float64(deal.Proposal.PieceSize)), fil, deal.Proposal.Duration()) - if verbose { - tchid := "" - if deal.TransferChannelId != nil { - tchid = deal.TransferChannelId.String() - } - _, _ = fmt.Fprintf(w, "\t%s", tchid) - _, _ = fmt.Fprintf(w, "\t%s", deal.Message) - } - - _, _ = fmt.Fprintln(w) - } - - return w.Flush() -} - -var getBlocklistCmd = &cli.Command{ - Name: "get-blocklist", - Usage: "List the contents of the miner's piece CID blocklist", - Flags: []cli.Flag{ - &CidBaseFlag, - }, - Action: func(cctx *cli.Context) error { - api, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - blocklist, err := api.DealsPieceCidBlocklist(lcli.DaemonContext(cctx)) - if err != nil { - return err - } - - encoder, err := GetCidEncoder(cctx) - if err != nil { - return err - } - - for idx := range blocklist { - fmt.Println(encoder.Encode(blocklist[idx])) - } - - return nil - }, -} - -var setBlocklistCmd = &cli.Command{ - Name: "set-blocklist", - Usage: "Set the miner's list of blocklisted piece CIDs", - ArgsUsage: "[ (optional, will read from stdin if omitted)]", - Flags: []cli.Flag{}, - Action: func(cctx *cli.Context) error { - api, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - scanner := bufio.NewScanner(os.Stdin) - if cctx.Args().Present() && cctx.Args().First() != "-" { - absPath, err := filepath.Abs(cctx.Args().First()) - if err != nil { - return err - } - - file, err := os.Open(absPath) - if err != nil { - log.Fatal(err) - } - defer file.Close() //nolint:errcheck - - scanner = bufio.NewScanner(file) - } - - var blocklist []cid.Cid - for scanner.Scan() { - decoded, err := cid.Decode(scanner.Text()) - if err != nil { - return err - } - - blocklist = append(blocklist, decoded) - } - - err = scanner.Err() - if err != nil { - return err - } - - return api.DealsSetPieceCidBlocklist(lcli.DaemonContext(cctx), blocklist) - }, -} - -var resetBlocklistCmd = &cli.Command{ - Name: "reset-blocklist", - Usage: "Remove all entries from the miner's piece CID blocklist", - Flags: []cli.Flag{}, - Action: func(cctx *cli.Context) error { - api, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - return api.DealsSetPieceCidBlocklist(lcli.DaemonContext(cctx), []cid.Cid{}) - }, -} - -var setSealDurationCmd = &cli.Command{ - Name: "set-seal-duration", - Usage: "Set the expected time, in minutes, that you expect sealing sectors to take. Deals that start before this duration will be rejected.", - ArgsUsage: "", - Action: func(cctx *cli.Context) error { - nodeApi, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := lcli.ReqContext(cctx) - if cctx.NArg() != 1 { - return lcli.IncorrectNumArgs(cctx) - } - - hs, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64) - if err != nil { - return xerrors.Errorf("could not parse duration: %w", err) - } - - delay := hs * uint64(time.Minute) - - return nodeApi.SectorSetExpectedSealDuration(ctx, time.Duration(delay)) - }, -} - -var dataTransfersCmd = &cli.Command{ - Name: "data-transfers", - Usage: "Manage data transfers", - Subcommands: []*cli.Command{ - transfersListCmd, - marketRestartTransfer, - marketCancelTransfer, - transfersDiagnosticsCmd, - }, -} - -var marketRestartTransfer = &cli.Command{ - Name: "restart", - Usage: "Force restart a stalled data transfer", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "peerid", - Usage: "narrow to transfer with specific peer", - }, - &cli.BoolFlag{ - Name: "initiator", - Usage: "specify only transfers where peer is/is not initiator", - Value: false, - }, - }, - Action: func(cctx *cli.Context) error { - if !cctx.Args().Present() { - return cli.ShowCommandHelp(cctx, cctx.Command.Name) - } - nodeApi, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := lcli.ReqContext(cctx) - - transferUint, err := strconv.ParseUint(cctx.Args().First(), 10, 64) - if err != nil { - return fmt.Errorf("Error reading transfer ID: %w", err) - } - transferID := datatransfer.TransferID(transferUint) - initiator := cctx.Bool("initiator") - var other peer.ID - if pidstr := cctx.String("peerid"); pidstr != "" { - p, err := peer.Decode(pidstr) - if err != nil { - return err - } - other = p - } else { - channels, err := nodeApi.MarketListDataTransfers(ctx) - if err != nil { - return err - } - found := false - for _, channel := range channels { - if channel.IsInitiator == initiator && channel.TransferID == transferID { - other = channel.OtherPeer - found = true - break - } - } - if !found { - return errors.New("unable to find matching data transfer") - } - } - - return nodeApi.MarketRestartDataTransfer(ctx, transferID, other, initiator) - }, -} - -var marketCancelTransfer = &cli.Command{ - Name: "cancel", - Usage: "Force cancel a data transfer", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "peerid", - Usage: "narrow to transfer with specific peer", - }, - &cli.BoolFlag{ - Name: "initiator", - Usage: "specify only transfers where peer is/is not initiator", - Value: false, - }, - &cli.DurationFlag{ - Name: "cancel-timeout", - Usage: "time to wait for cancel to be sent to client", - Value: 5 * time.Second, - }, - }, - Action: func(cctx *cli.Context) error { - if !cctx.Args().Present() { - return cli.ShowCommandHelp(cctx, cctx.Command.Name) - } - nodeApi, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := lcli.ReqContext(cctx) - - transferUint, err := strconv.ParseUint(cctx.Args().First(), 10, 64) - if err != nil { - return fmt.Errorf("Error reading transfer ID: %w", err) - } - transferID := datatransfer.TransferID(transferUint) - initiator := cctx.Bool("initiator") - var other peer.ID - if pidstr := cctx.String("peerid"); pidstr != "" { - p, err := peer.Decode(pidstr) - if err != nil { - return err - } - other = p - } else { - channels, err := nodeApi.MarketListDataTransfers(ctx) - if err != nil { - return err - } - found := false - for _, channel := range channels { - if channel.IsInitiator == initiator && channel.TransferID == transferID { - other = channel.OtherPeer - found = true - break - } - } - if !found { - return errors.New("unable to find matching data transfer") - } - } - - timeoutCtx, cancel := context.WithTimeout(ctx, cctx.Duration("cancel-timeout")) - defer cancel() - return nodeApi.MarketCancelDataTransfer(timeoutCtx, transferID, other, initiator) - }, -} - -var transfersListCmd = &cli.Command{ - Name: "list", - Usage: "List ongoing data transfers for this miner", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "verbose", - Aliases: []string{"v"}, - Usage: "print verbose transfer details", - }, - &cli.BoolFlag{ - Name: "completed", - Usage: "show completed data transfers", - }, - &cli.BoolFlag{ - Name: "watch", - Usage: "watch deal updates in real-time, rather than a one time list", - }, - &cli.BoolFlag{ - Name: "show-failed", - Usage: "show failed/cancelled transfers", - }, - }, - Action: func(cctx *cli.Context) error { - api, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := lcli.ReqContext(cctx) - - channels, err := api.MarketListDataTransfers(ctx) - if err != nil { - return err - } - - verbose := cctx.Bool("verbose") - completed := cctx.Bool("completed") - watch := cctx.Bool("watch") - showFailed := cctx.Bool("show-failed") - if watch { - channelUpdates, err := api.MarketDataTransferUpdates(ctx) - if err != nil { - return err - } - - for { - tm.Clear() // Clear current screen - - tm.MoveCursor(1, 1) - - lcli.OutputDataTransferChannels(tm.Screen, channels, verbose, completed, showFailed) - - tm.Flush() - - select { - case <-ctx.Done(): - return nil - case channelUpdate := <-channelUpdates: - var found bool - for i, existing := range channels { - if existing.TransferID == channelUpdate.TransferID && - existing.OtherPeer == channelUpdate.OtherPeer && - existing.IsSender == channelUpdate.IsSender && - existing.IsInitiator == channelUpdate.IsInitiator { - channels[i] = channelUpdate - found = true - break - } - } - if !found { - channels = append(channels, channelUpdate) - } - } - } - } - lcli.OutputDataTransferChannels(os.Stdout, channels, verbose, completed, showFailed) - return nil - }, -} - -var transfersDiagnosticsCmd = &cli.Command{ - Name: "diagnostics", - Usage: "Get detailed diagnostics on active transfers with a specific peer", - Flags: []cli.Flag{}, - Action: func(cctx *cli.Context) error { - if !cctx.Args().Present() { - return cli.ShowCommandHelp(cctx, cctx.Command.Name) - } - api, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := lcli.ReqContext(cctx) - - targetPeer, err := peer.Decode(cctx.Args().First()) - if err != nil { - return err - } - diagnostics, err := api.MarketDataTransferDiagnostics(ctx, targetPeer) - if err != nil { - return err - } - out, err := json.MarshalIndent(diagnostics, "", "\t") - if err != nil { - return err - } - fmt.Println(string(out)) - return nil - }, -} - -var dealsPendingPublish = &cli.Command{ - Name: "pending-publish", - Usage: "list deals waiting in publish queue", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "publish-now", - Usage: "send a publish message now", - }, - }, - Action: func(cctx *cli.Context) error { - api, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := lcli.ReqContext(cctx) - - if cctx.Bool("publish-now") { - if err := api.MarketPublishPendingDeals(ctx); err != nil { - return xerrors.Errorf("publishing deals: %w", err) - } - fmt.Println("triggered deal publishing") - return nil - } - - pending, err := api.MarketPendingDeals(ctx) - if err != nil { - return xerrors.Errorf("getting pending deals: %w", err) - } - - if len(pending.Deals) > 0 { - endsIn := pending.PublishPeriodStart.Add(pending.PublishPeriod).Sub(time.Now()) - w := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0) - _, _ = fmt.Fprintf(w, "Publish period: %s (ends in %s)\n", pending.PublishPeriod, endsIn.Round(time.Second)) - _, _ = fmt.Fprintf(w, "First deal queued at: %s\n", pending.PublishPeriodStart) - _, _ = fmt.Fprintf(w, "Deals will be published at: %s\n", pending.PublishPeriodStart.Add(pending.PublishPeriod)) - _, _ = fmt.Fprintf(w, "%d deals queued to be published:\n", len(pending.Deals)) - _, _ = fmt.Fprintf(w, "ProposalCID\tClient\tSize\n") - for _, deal := range pending.Deals { - proposalNd, err := cborutil.AsIpld(&deal) // nolint - if err != nil { - return err - } - - _, _ = fmt.Fprintf(w, "%s\t%s\t%s\n", proposalNd.Cid(), deal.Proposal.Client, units.BytesSize(float64(deal.Proposal.PieceSize))) - } - return w.Flush() - } - - fmt.Println("No deals queued to be published") - return nil - }, -} - -var dealsRetryPublish = &cli.Command{ - Name: "retry-publish", - Usage: "retry publishing a deal", - ArgsUsage: "", - Action: func(cctx *cli.Context) error { - if !cctx.Args().Present() { - return cli.ShowCommandHelp(cctx, cctx.Command.Name) - } - api, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := lcli.ReqContext(cctx) - - propcid := cctx.Args().First() - fmt.Printf("retrying deal with proposal-cid: %s\n", propcid) - - cid, err := cid.Decode(propcid) - if err != nil { - return err - } - if err := api.MarketRetryPublishDeal(ctx, cid); err != nil { - return xerrors.Errorf("retrying publishing deal: %w", err) - } - fmt.Println("retried to publish deal") - return nil - }, -} - -func listDealsWithJSON(cctx *cli.Context) error { - node, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := lcli.DaemonContext(cctx) - - deals, err := node.MarketListIncompleteDeals(ctx) - if err != nil { - return err - } - - channels, err := node.MarketListDataTransfers(ctx) - if err != nil { - return err - } - - sort.Slice(deals, func(i, j int) bool { - return deals[i].CreationTime.Time().Before(deals[j].CreationTime.Time()) - }) - - channelsByTransferID := map[datatransfer.TransferID]api.DataTransferChannel{} - for _, c := range channels { - channelsByTransferID[c.TransferID] = c - } - - w := json.NewEncoder(os.Stdout) - - for _, deal := range deals { - val := struct { - DateTime string `json:"datetime"` - VerifiedDeal bool `json:"verified-deal"` - ProposalCID string `json:"proposal-cid"` - DealID abi.DealID `json:"deal-id"` - DealStatus string `json:"deal-status"` - Client string `json:"client"` - PieceSize string `json:"piece-size"` - Price types.FIL `json:"price"` - DurationEpochs abi.ChainEpoch `json:"duration-epochs"` - TransferID *datatransfer.TransferID `json:"transfer-id,omitempty"` - TransferStatus string `json:"transfer-status,omitempty"` - TransferredData string `json:"transferred-data,omitempty"` - }{} - - val.DateTime = deal.CreationTime.Time().Format(time.RFC3339) - val.VerifiedDeal = deal.Proposal.VerifiedDeal - val.ProposalCID = deal.ProposalCid.String() - val.DealID = deal.DealID - val.DealStatus = storagemarket.DealStates[deal.State] - val.Client = deal.Proposal.Client.String() - val.PieceSize = units.BytesSize(float64(deal.Proposal.PieceSize)) - val.Price = types.FIL(types.BigMul(deal.Proposal.StoragePricePerEpoch, types.NewInt(uint64(deal.Proposal.Duration())))) - val.DurationEpochs = deal.Proposal.Duration() - - if deal.TransferChannelId != nil { - if c, ok := channelsByTransferID[deal.TransferChannelId.ID]; ok { - val.TransferID = &c.TransferID - val.TransferStatus = datatransfer.Statuses[c.Status] - val.TransferredData = units.BytesSize(float64(c.Transferred)) - } - } - - err := w.Encode(val) - if err != nil { - return err - } - } - - return nil -} diff --git a/cmd/lotus-miner/pieces.go b/cmd/lotus-miner/pieces.go deleted file mode 100644 index a64142237c2..00000000000 --- a/cmd/lotus-miner/pieces.go +++ /dev/null @@ -1,193 +0,0 @@ -package main - -import ( - "fmt" - "os" - "text/tabwriter" - - "github.com/ipfs/go-cid" - "github.com/urfave/cli/v2" - - lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/lib/tablewriter" -) - -var piecesCmd = &cli.Command{ - Name: "pieces", - Usage: "interact with the piecestore", - Description: "The piecestore is a database that tracks and manages data that is made available to the retrieval market", - Subcommands: []*cli.Command{ - piecesListPiecesCmd, - piecesListCidInfosCmd, - piecesInfoCmd, - piecesCidInfoCmd, - }, -} - -var piecesListPiecesCmd = &cli.Command{ - Name: "list-pieces", - Usage: "list registered pieces", - Action: func(cctx *cli.Context) error { - nodeApi, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := lcli.ReqContext(cctx) - - pieceCids, err := nodeApi.PiecesListPieces(ctx) - if err != nil { - return err - } - - for _, pc := range pieceCids { - fmt.Println(pc) - } - return nil - }, -} - -var piecesListCidInfosCmd = &cli.Command{ - Name: "list-cids", - Usage: "list registered payload CIDs", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "verbose", - Aliases: []string{"v"}, - }, - }, - Action: func(cctx *cli.Context) error { - nodeApi, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := lcli.ReqContext(cctx) - - cids, err := nodeApi.PiecesListCidInfos(ctx) - if err != nil { - return err - } - - w := tablewriter.New(tablewriter.Col("CID"), - tablewriter.Col("Piece"), - tablewriter.Col("BlockOffset"), - tablewriter.Col("BlockLen"), - tablewriter.Col("Deal"), - tablewriter.Col("Sector"), - tablewriter.Col("DealOffset"), - tablewriter.Col("DealLen"), - ) - - for _, c := range cids { - if !cctx.Bool("verbose") { - fmt.Println(c) - continue - } - - ci, err := nodeApi.PiecesGetCIDInfo(ctx, c) - if err != nil { - fmt.Printf("Error getting CID info: %s\n", err) - continue - } - - for _, location := range ci.PieceBlockLocations { - pi, err := nodeApi.PiecesGetPieceInfo(ctx, location.PieceCID) - if err != nil { - fmt.Printf("Error getting piece info: %s\n", err) - continue - } - - for _, deal := range pi.Deals { - w.Write(map[string]interface{}{ - "CID": c, - "Piece": location.PieceCID, - "BlockOffset": location.RelOffset, - "BlockLen": location.BlockSize, - "Deal": deal.DealID, - "Sector": deal.SectorID, - "DealOffset": deal.Offset, - "DealLen": deal.Length, - }) - } - } - } - - if cctx.Bool("verbose") { - return w.Flush(os.Stdout) - } - - return nil - }, -} - -var piecesInfoCmd = &cli.Command{ - Name: "piece-info", - Usage: "get registered information for a given piece CID", - Action: func(cctx *cli.Context) error { - if !cctx.Args().Present() { - return lcli.ShowHelp(cctx, fmt.Errorf("must specify piece cid")) - } - - nodeApi, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := lcli.ReqContext(cctx) - - c, err := cid.Decode(cctx.Args().First()) - if err != nil { - return err - } - - pi, err := nodeApi.PiecesGetPieceInfo(ctx, c) - if err != nil { - return err - } - - fmt.Println("Piece: ", pi.PieceCID) - w := tabwriter.NewWriter(os.Stdout, 4, 4, 2, ' ', 0) - fmt.Fprintln(w, "Deals:\nDealID\tSectorID\tLength\tOffset") - for _, d := range pi.Deals { - fmt.Fprintf(w, "%d\t%d\t%d\t%d\n", d.DealID, d.SectorID, d.Length, d.Offset) - } - return w.Flush() - }, -} - -var piecesCidInfoCmd = &cli.Command{ - Name: "cid-info", - Usage: "get registered information for a given payload CID", - Action: func(cctx *cli.Context) error { - if !cctx.Args().Present() { - return lcli.ShowHelp(cctx, fmt.Errorf("must specify payload cid")) - } - - nodeApi, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := lcli.ReqContext(cctx) - - c, err := cid.Decode(cctx.Args().First()) - if err != nil { - return err - } - - ci, err := nodeApi.PiecesGetCIDInfo(ctx, c) - if err != nil { - return err - } - - fmt.Println("Info for: ", ci.CID) - - w := tabwriter.NewWriter(os.Stdout, 4, 4, 2, ' ', 0) - fmt.Fprintf(w, "PieceCid\tOffset\tSize\n") - for _, loc := range ci.PieceBlockLocations { - fmt.Fprintf(w, "%s\t%d\t%d\n", loc.PieceCID, loc.RelOffset, loc.BlockSize) - } - return w.Flush() - }, -} diff --git a/cmd/lotus-miner/retrieval-deals.go b/cmd/lotus-miner/retrieval-deals.go deleted file mode 100644 index 42b0fa1f6f8..00000000000 --- a/cmd/lotus-miner/retrieval-deals.go +++ /dev/null @@ -1,231 +0,0 @@ -package main - -import ( - "fmt" - "os" - "text/tabwriter" - - "github.com/docker/go-units" - "github.com/urfave/cli/v2" - - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/chain/types" - lcli "github.com/filecoin-project/lotus/cli" -) - -var retrievalDealsCmd = &cli.Command{ - Name: "retrieval-deals", - Usage: "Manage retrieval deals and related configuration", - Subcommands: []*cli.Command{ - retrievalDealSelectionCmd, - retrievalSetAskCmd, - retrievalGetAskCmd, - }, -} - -var retrievalDealSelectionCmd = &cli.Command{ - Name: "selection", - Usage: "Configure acceptance criteria for retrieval deal proposals", - Subcommands: []*cli.Command{ - retrievalDealSelectionShowCmd, - retrievalDealSelectionResetCmd, - retrievalDealSelectionRejectCmd, - }, -} - -var retrievalDealSelectionShowCmd = &cli.Command{ - Name: "list", - Usage: "List retrieval deal proposal selection criteria", - Action: func(cctx *cli.Context) error { - smapi, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - onlineOk, err := smapi.DealsConsiderOnlineRetrievalDeals(lcli.DaemonContext(cctx)) - if err != nil { - return err - } - - offlineOk, err := smapi.DealsConsiderOfflineRetrievalDeals(lcli.DaemonContext(cctx)) - if err != nil { - return err - } - - fmt.Printf("considering online retrieval deals: %t\n", onlineOk) - fmt.Printf("considering offline retrieval deals: %t\n", offlineOk) - - return nil - }, -} - -var retrievalDealSelectionResetCmd = &cli.Command{ - Name: "reset", - Usage: "Reset retrieval deal proposal selection criteria to default values", - Action: func(cctx *cli.Context) error { - smapi, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - err = smapi.DealsSetConsiderOnlineRetrievalDeals(lcli.DaemonContext(cctx), true) - if err != nil { - return err - } - - err = smapi.DealsSetConsiderOfflineRetrievalDeals(lcli.DaemonContext(cctx), true) - if err != nil { - return err - } - - return nil - }, -} - -var retrievalDealSelectionRejectCmd = &cli.Command{ - Name: "reject", - Usage: "Configure criteria which necessitate automatic rejection", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "online", - }, - &cli.BoolFlag{ - Name: "offline", - }, - }, - Action: func(cctx *cli.Context) error { - smapi, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - if cctx.Bool("online") { - err = smapi.DealsSetConsiderOnlineRetrievalDeals(lcli.DaemonContext(cctx), false) - if err != nil { - return err - } - } - - if cctx.Bool("offline") { - err = smapi.DealsSetConsiderOfflineRetrievalDeals(lcli.DaemonContext(cctx), false) - if err != nil { - return err - } - } - - return nil - }, -} - -var retrievalSetAskCmd = &cli.Command{ - Name: "set-ask", - Usage: "Configure the provider's retrieval ask", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "price", - Usage: "Set the price of the ask for retrievals (FIL/GiB)", - }, - &cli.StringFlag{ - Name: "unseal-price", - Usage: "Set the price to unseal", - }, - &cli.StringFlag{ - Name: "payment-interval", - Usage: "Set the payment interval (in bytes) for retrieval", - DefaultText: "1MiB", - }, - &cli.StringFlag{ - Name: "payment-interval-increase", - Usage: "Set the payment interval increase (in bytes) for retrieval", - DefaultText: "1MiB", - }, - }, - Action: func(cctx *cli.Context) error { - ctx := lcli.DaemonContext(cctx) - - api, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - ask, err := api.MarketGetRetrievalAsk(ctx) - if err != nil { - return err - } - - if cctx.IsSet("price") { - v, err := types.ParseFIL(cctx.String("price")) - if err != nil { - return err - } - ask.PricePerByte = types.BigDiv(types.BigInt(v), types.NewInt(1<<30)) - } - - if cctx.IsSet("unseal-price") { - v, err := types.ParseFIL(cctx.String("unseal-price")) - if err != nil { - return err - } - ask.UnsealPrice = abi.TokenAmount(v) - } - - if cctx.IsSet("payment-interval") { - v, err := units.RAMInBytes(cctx.String("payment-interval")) - if err != nil { - return err - } - ask.PaymentInterval = uint64(v) - } - - if cctx.IsSet("payment-interval-increase") { - v, err := units.RAMInBytes(cctx.String("payment-interval-increase")) - if err != nil { - return err - } - ask.PaymentIntervalIncrease = uint64(v) - } - - return api.MarketSetRetrievalAsk(ctx, ask) - }, -} - -var retrievalGetAskCmd = &cli.Command{ - Name: "get-ask", - Usage: "Get the provider's current retrieval ask configured by the provider in the ask-store using the set-ask CLI command", - Flags: []cli.Flag{}, - Action: func(cctx *cli.Context) error { - ctx := lcli.DaemonContext(cctx) - - api, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - ask, err := api.MarketGetRetrievalAsk(ctx) - if err != nil { - return err - } - - w := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0) - _, _ = fmt.Fprintf(w, "Price per Byte\tUnseal Price\tPayment Interval\tPayment Interval Increase\n") - if ask == nil { - _, _ = fmt.Fprintf(w, "\n") - return w.Flush() - } - - _, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", - types.FIL(ask.PricePerByte), - types.FIL(ask.UnsealPrice), - units.BytesSize(float64(ask.PaymentInterval)), - units.BytesSize(float64(ask.PaymentIntervalIncrease)), - ) - return w.Flush() - - }, -} diff --git a/cmd/lotus-miner/run.go b/cmd/lotus-miner/run.go index 93dfea2fc4d..e09968165b9 100644 --- a/cmd/lotus-miner/run.go +++ b/cmd/lotus-miner/run.go @@ -20,7 +20,6 @@ import ( "github.com/filecoin-project/lotus/lib/ulimit" "github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/node" - "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/repo" ) @@ -57,7 +56,7 @@ var runCmd = &cli.Command{ } ctx, _ := tag.New(lcli.DaemonContext(cctx), - tag.Insert(metrics.Version, build.BuildVersion), + tag.Insert(metrics.Version, build.MinerBuildVersion), tag.Insert(metrics.Commit, build.CurrentCommit), tag.Insert(metrics.NodeType, "miner"), ) @@ -121,16 +120,6 @@ var runCmd = &cli.Command{ if err != nil { return err } - c, err := lr.Config() - if err != nil { - return err - } - cfg, ok := c.(*config.StorageMiner) - if !ok { - return xerrors.Errorf("invalid config for repo, got: %T", c) - } - - bootstrapLibP2P := cfg.Subsystems.EnableMarkets err = lr.Close() if err != nil { @@ -141,7 +130,7 @@ var runCmd = &cli.Command{ var minerapi api.StorageMiner stop, err := node.New(ctx, - node.StorageMiner(&minerapi, cfg.Subsystems), + node.StorageMiner(&minerapi), node.Override(new(dtypes.ShutdownChan), shutdownChan), node.Base(), node.Repo(r), @@ -161,20 +150,6 @@ var runCmd = &cli.Command{ return xerrors.Errorf("getting API endpoint: %w", err) } - if bootstrapLibP2P { - log.Infof("Bootstrapping libp2p network with full node") - - // Bootstrap with full node - remoteAddrs, err := nodeApi.NetAddrsListen(ctx) - if err != nil { - return xerrors.Errorf("getting full node libp2p address: %w", err) - } - - if err := minerapi.NetConnect(ctx, remoteAddrs); err != nil { - return xerrors.Errorf("connecting to full node (libp2p): %w", err) - } - } - log.Infof("Remote version %s", v) // Instantiate the miner node handler. diff --git a/cmd/lotus-miner/sectors.go b/cmd/lotus-miner/sectors.go index cf32f424895..c3217a1a729 100644 --- a/cmd/lotus-miner/sectors.go +++ b/cmd/lotus-miner/sectors.go @@ -588,7 +588,7 @@ var sectorsRefsCmd = &cli.Command{ Name: "refs", Usage: "List References to sectors", Action: func(cctx *cli.Context) error { - nodeApi, closer, err := lcli.GetMarketsAPI(cctx) + nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { return err } diff --git a/cmd/lotus-pcr/main.go b/cmd/lotus-pcr/main.go index 199810e03cd..a4a246f33c7 100644 --- a/cmd/lotus-pcr/main.go +++ b/cmd/lotus-pcr/main.go @@ -70,7 +70,7 @@ func main() { A single message will be produced per miner totaling their refund for all PreCommitSector messages in a tipset. `, - Version: build.UserVersion(), + Version: string(build.NodeUserVersion()), Flags: []cli.Flag{ &cli.StringFlag{ Name: "lotus-path", diff --git a/cmd/lotus-seed/main.go b/cmd/lotus-seed/main.go index 9deae560eb9..dd70cce2177 100644 --- a/cmd/lotus-seed/main.go +++ b/cmd/lotus-seed/main.go @@ -38,7 +38,7 @@ func main() { app := &cli.App{ Name: "lotus-seed", Usage: "Seal sectors for genesis miner", - Version: build.UserVersion(), + Version: string(build.NodeUserVersion()), Flags: []cli.Flag{ &cli.StringFlag{ Name: "sector-dir", diff --git a/cmd/lotus-shed/actor.go b/cmd/lotus-shed/actor.go index 2acf89076bc..c7eb949c22f 100644 --- a/cmd/lotus-shed/actor.go +++ b/cmd/lotus-shed/actor.go @@ -719,12 +719,12 @@ var actorControlSet = &cli.Command{ return err } - fmt.Fprintln(cctx.App.Writer, hex.EncodeToString(msgBytes)) + _, _ = fmt.Fprintln(cctx.App.Writer, hex.EncodeToString(msgBytes)) return nil } if !cctx.Bool("really-do-it") { - fmt.Fprintln(cctx.App.Writer, "Pass --really-do-it to actually execute this action") + _, _ = fmt.Fprintln(cctx.App.Writer, "Pass --really-do-it to actually execute this action") return nil } @@ -760,7 +760,7 @@ var actorProposeChangeWorker = &cli.Command{ } if !cctx.Bool("really-do-it") { - fmt.Fprintln(cctx.App.Writer, "Pass --really-do-it to actually execute this action") + _, _ = fmt.Fprintln(cctx.App.Writer, "Pass --really-do-it to actually execute this action") return nil } @@ -840,7 +840,7 @@ var actorProposeChangeWorker = &cli.Command{ return xerrors.Errorf("mpool push: %w", err) } - fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", smsg.Cid()) + _, _ = fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", smsg.Cid()) // wait for it to get mined into a block wait, err := nodeAPI.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) @@ -850,7 +850,7 @@ var actorProposeChangeWorker = &cli.Command{ // check it executed successfully if wait.Receipt.ExitCode.IsError() { - fmt.Fprintln(cctx.App.Writer, "Propose worker change failed!") + _, _ = fmt.Fprintln(cctx.App.Writer, "Propose worker change failed!") return err } @@ -862,8 +862,8 @@ var actorProposeChangeWorker = &cli.Command{ return fmt.Errorf("Proposed worker address change not reflected on chain: expected '%s', found '%s'", na, mi.NewWorker) } - fmt.Fprintf(cctx.App.Writer, "Worker key change to %s successfully proposed.\n", na) - fmt.Fprintf(cctx.App.Writer, "Call 'confirm-change-worker' at or after height %d to complete.\n", mi.WorkerChangeEpoch) + _, _ = fmt.Fprintf(cctx.App.Writer, "Worker key change to %s successfully proposed.\n", na) + _, _ = fmt.Fprintf(cctx.App.Writer, "Call 'confirm-change-worker' at or after height %d to complete.\n", mi.WorkerChangeEpoch) return nil }, @@ -890,7 +890,7 @@ var actorConfirmChangeWorker = &cli.Command{ } if !cctx.Bool("really-do-it") { - fmt.Fprintln(cctx.App.Writer, "Pass --really-do-it to actually execute this action") + _, _ = fmt.Fprintln(cctx.App.Writer, "Pass --really-do-it to actually execute this action") return nil } @@ -961,7 +961,7 @@ var actorConfirmChangeWorker = &cli.Command{ return xerrors.Errorf("mpool push: %w", err) } - fmt.Fprintln(cctx.App.Writer, "Confirm Message CID:", smsg.Cid()) + _, _ = fmt.Fprintln(cctx.App.Writer, "Confirm Message CID:", smsg.Cid()) // wait for it to get mined into a block wait, err := nodeAPI.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) @@ -971,7 +971,7 @@ var actorConfirmChangeWorker = &cli.Command{ // check it executed successfully if wait.Receipt.ExitCode.IsError() { - fmt.Fprintln(cctx.App.Writer, "Worker change failed!") + _, _ = fmt.Fprintln(cctx.App.Writer, "Worker change failed!") return err } diff --git a/cmd/lotus-shed/deal.go b/cmd/lotus-shed/deal.go deleted file mode 100644 index 3a350976e9b..00000000000 --- a/cmd/lotus-shed/deal.go +++ /dev/null @@ -1,284 +0,0 @@ -package main - -import ( - "bytes" - "fmt" - "io" - "net/http" - "net/http/httptest" - "net/url" - "os" - - "github.com/fatih/color" - "github.com/mitchellh/go-homedir" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - cborutil "github.com/filecoin-project/go-cbor-util" - commcid "github.com/filecoin-project/go-fil-commcid" - commp "github.com/filecoin-project/go-fil-commp-hashhash" - "github.com/filecoin-project/go-jsonrpc" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v9/market" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" - lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/lib/must" -) - -var lpUtilCmd = &cli.Command{ - Name: "curio-util", - Usage: "curio utility commands", - Subcommands: []*cli.Command{ - lpUtilStartDealCmd, - }, -} - -var lpUtilStartDealCmd = &cli.Command{ - Name: "start-deal", - Usage: "start a deal with a specific curio instance", - ArgsUsage: "[dataFile] [miner]", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "curio-rpc", - Value: "http://127.0.0.1:12300", - }, - }, - Action: func(cctx *cli.Context) error { - if cctx.Args().Len() != 2 { - return xerrors.Errorf("expected 2 arguments") - } - - maddr, err := address.NewFromString(cctx.Args().Get(1)) - if err != nil { - return xerrors.Errorf("parse miner address: %w", err) - } - - full, closer, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - - defer closer() - ctx := lcli.ReqContext(cctx) - - defAddr, err := full.WalletDefaultAddress(ctx) - if err != nil { - return xerrors.Errorf("get default address: %w", err) - } - - // open rpc - var rpc api.CurioStruct - closer2, err := jsonrpc.NewMergeClient(ctx, cctx.String("curio-rpc"), "Filecoin", []interface{}{&rpc.Internal}, nil) - if err != nil { - return xerrors.Errorf("open rpc: %w", err) - } - defer closer2() - - v, err := rpc.Version(ctx) - if err != nil { - return xerrors.Errorf("rpc version: %w", err) - } - - fmt.Printf("* curio version: %s\n", v.String()) - - // open data file - data, err := homedir.Expand(cctx.Args().Get(0)) - if err != nil { - return xerrors.Errorf("get data file: %w", err) - } - - df, err := os.Open(data) - if err != nil { - return xerrors.Errorf("open data file: %w", err) - } - - dstat, err := df.Stat() - if err != nil { - return xerrors.Errorf("stat data file: %w", err) - } - - // compute commd - color.Green("> computing piece CID\n") - - writer := new(commp.Calc) - _, err = io.Copy(writer, df) - if err != nil { - return xerrors.Errorf("compute commd copy: %w", err) - } - - commp, pps, err := writer.Digest() - if err != nil { - return xerrors.Errorf("compute commd: %w", err) - } - - pieceCid, err := commcid.PieceCommitmentV1ToCID(commp) - if err != nil { - return xerrors.Errorf("make pieceCid: %w", err) - } - - fmt.Printf("* piece CID: %s\n", pieceCid) - fmt.Printf("* piece size: %d\n", pps) - - // start serving the file - color.Green("> starting temp http server\n") - - deleteCalled := make(chan struct{}) - - mux := http.NewServeMux() - mux.HandleFunc("/"+pieceCid.String(), func(w http.ResponseWriter, r *http.Request) { - // log request and method - color.Blue("< %s %s\n", r.Method, r.URL) - - if r.Method == http.MethodDelete { - close(deleteCalled) - return - } - - http.ServeFile(w, r, data) - }) - - ts := httptest.NewServer(mux) - - dataUrl, err := url.Parse(ts.URL) - if err != nil { - return xerrors.Errorf("parse data url: %w", err) - } - dataUrl.Path = "/" + pieceCid.String() - - fmt.Printf("* data url: %s\n", dataUrl) - - // publish the deal - color.Green("> publishing deal\n") - - head, err := full.ChainHead(ctx) - if err != nil { - return xerrors.Errorf("get chain head: %w", err) - } - - verif := false - - bds, err := full.StateDealProviderCollateralBounds(ctx, abi.PaddedPieceSize(pps), verif, head.Key()) - if err != nil { - return xerrors.Errorf("get provider collateral bounds: %w", err) - } - - pcoll := big.Mul(bds.Min, big.NewInt(2)) - - dealProposal := market.DealProposal{ - PieceCID: pieceCid, - PieceSize: abi.PaddedPieceSize(pps), - VerifiedDeal: verif, - Client: defAddr, - Provider: maddr, - Label: must.One(market.NewLabelFromString("lotus-shed-made-this")), - StartEpoch: head.Height() + 2000, - EndEpoch: head.Height() + 2880*300, - StoragePricePerEpoch: big.Zero(), - ProviderCollateral: pcoll, - ClientCollateral: big.Zero(), - } - - pbuf, err := cborutil.Dump(&dealProposal) - if err != nil { - return xerrors.Errorf("dump deal proposal: %w", err) - } - - sig, err := full.WalletSign(ctx, defAddr, pbuf) - if err != nil { - return xerrors.Errorf("sign deal proposal: %w", err) - } - - params := market.PublishStorageDealsParams{ - Deals: []market.ClientDealProposal{ - { - Proposal: dealProposal, - ClientSignature: *sig, - }, - }, - } - - var buf bytes.Buffer - err = params.MarshalCBOR(&buf) - if err != nil { - return xerrors.Errorf("marshal params: %w", err) - } - - msg := &types.Message{ - To: builtin.StorageMarketActorAddr, - From: defAddr, - Method: builtin.MethodsMarket.PublishStorageDeals, - Params: buf.Bytes(), - } - - smsg, err := full.MpoolPushMessage(ctx, msg, nil) - if err != nil { - return xerrors.Errorf("push message: %w", err) - } - - fmt.Printf("* PSD message cid: %s\n", smsg.Cid()) - - // wait for deal to be published - color.Green("> waiting for PublishStorageDeals to land on chain\n") - - rcpt, err := full.StateWaitMsg(ctx, smsg.Cid(), 3) - if err != nil { - return xerrors.Errorf("wait message: %w", err) - } - - if rcpt.Receipt.ExitCode != 0 { - return xerrors.Errorf("publish deal failed: exit code %d", rcpt.Receipt.ExitCode) - } - - // parse results - var ret market.PublishStorageDealsReturn - err = ret.UnmarshalCBOR(bytes.NewReader(rcpt.Receipt.Return)) - if err != nil { - return xerrors.Errorf("unmarshal return: %w", err) - } - - if len(ret.IDs) != 1 { - return xerrors.Errorf("expected 1 deal id, got %d", len(ret.IDs)) - } - - dealId := ret.IDs[0] - - fmt.Printf("* deal id: %d\n", dealId) - - // start deal - color.Green("> starting deal\n") - - pcid := smsg.Cid() - - pdi := api.PieceDealInfo{ - PublishCid: &pcid, - DealID: dealId, - DealProposal: &dealProposal, - DealSchedule: api.DealSchedule{ - StartEpoch: dealProposal.StartEpoch, - EndEpoch: dealProposal.EndEpoch, - }, - KeepUnsealed: true, - } - - soff, err := rpc.AllocatePieceToSector(ctx, maddr, pdi, dstat.Size(), *dataUrl, nil) - if err != nil { - return xerrors.Errorf("allocate piece to sector: %w", err) - } - - fmt.Printf("* sector offset: %d\n", soff) - - // wait for delete call on the file - color.Green("> waiting for file to be deleted (on sector finalize)\n") - - <-deleteCalled - - fmt.Println("* done") - - return nil - }, -} diff --git a/cmd/lotus-shed/eth.go b/cmd/lotus-shed/eth.go index fde4f96f68f..46bd1b43c29 100644 --- a/cmd/lotus-shed/eth.go +++ b/cmd/lotus-shed/eth.go @@ -88,16 +88,16 @@ var computeEthHashCmd = &cli.Command{ switch msg := msg.(type) { case *types.SignedMessage: - tx, err := ethtypes.EthTxFromSignedEthMessage(msg) + tx, err := ethtypes.EthTransactionFromSignedFilecoinMessage(msg) if err != nil { return fmt.Errorf("failed to convert from signed message: %w", err) } - tx.Hash, err = tx.TxHash() + hash, err := tx.TxHash() if err != nil { return fmt.Errorf("failed to call TxHash: %w", err) } - fmt.Println(tx.Hash) + fmt.Println(hash) default: return fmt.Errorf("not a signed message") } diff --git a/cmd/lotus-shed/indexes.go b/cmd/lotus-shed/indexes.go index 12ebe0082b5..334b7f10453 100644 --- a/cmd/lotus-shed/indexes.go +++ b/cmd/lotus-shed/indexes.go @@ -7,6 +7,7 @@ import ( "path" "path/filepath" "strings" + "time" "github.com/mitchellh/go-homedir" "github.com/urfave/cli/v2" @@ -23,6 +24,13 @@ import ( lcli "github.com/filecoin-project/lotus/cli" ) +const ( + // same as in chain/events/index.go + eventExists = `SELECT MAX(id) FROM event WHERE height=? AND tipset_key=? AND tipset_key_cid=? AND emitter_addr=? AND event_index=? AND message_cid=? AND message_index=?` + insertEvent = `INSERT OR IGNORE INTO event(height, tipset_key, tipset_key_cid, emitter_addr, event_index, message_cid, message_index, reverted) VALUES(?, ?, ?, ?, ?, ?, ?, ?)` + insertEntry = `INSERT OR IGNORE INTO event_entry(event_id, indexed, flags, key, codec, value) VALUES(?, ?, ?, ?, ?, ?)` +) + func withCategory(cat string, cmd *cli.Command) *cli.Command { cmd.Category = strings.ToUpper(cat) return cmd @@ -54,6 +62,16 @@ var backfillEventsCmd = &cli.Command{ Value: 2000, Usage: "the number of epochs to backfill", }, + &cli.BoolFlag{ + Name: "temporary-index", + Value: false, + Usage: "use a temporary index to speed up the backfill process", + }, + &cli.BoolFlag{ + Name: "vacuum", + Value: false, + Usage: "run VACUUM on the database after backfilling is complete; this will reclaim space from deleted rows, but may take a long time", + }, }, Action: func(cctx *cli.Context) error { srv, err := lcli.GetFullNodeServices(cctx) @@ -92,8 +110,12 @@ var backfillEventsCmd = &cli.Command{ return err } + log.Infof( + "WARNING: If this command is run against a node that is currently collecting events with DisableHistoricFilterAPI=false, " + + "it may cause the node to fail to record recent events due to the need to obtain an exclusive lock on the database for writes.") + dbPath := path.Join(basePath, "sqlite", "events.db") - db, err := sql.Open("sqlite3", dbPath) + db, err := sql.Open("sqlite3", dbPath+"?_txlock=immediate") if err != nil { return err } @@ -105,6 +127,14 @@ var backfillEventsCmd = &cli.Command{ } }() + if cctx.Bool("temporary-index") { + log.Info("creating temporary index (tmp_event_backfill_index) on event table to speed up backfill") + _, err := db.Exec("CREATE INDEX IF NOT EXISTS tmp_event_backfill_index ON event (height, tipset_key, tipset_key_cid, emitter_addr, event_index, message_cid, message_index, reverted);") + if err != nil { + return err + } + } + addressLookups := make(map[abi.ActorID]address.Address) // TODO: We don't need this address resolution anymore once https://github.com/filecoin-project/lotus/issues/11594 lands @@ -133,26 +163,36 @@ var backfillEventsCmd = &cli.Command{ var totalEventsAffected int64 var totalEntriesAffected int64 + stmtEventExists, err := db.Prepare(eventExists) + if err != nil { + return err + } + stmtInsertEvent, err := db.Prepare(insertEvent) + if err != nil { + return err + } + stmtInsertEntry, err := db.Prepare(insertEntry) + if err != nil { + return err + } + processHeight := func(ctx context.Context, cnt int, msgs []lapi.Message, receipts []*types.MessageReceipt) error { - tx, err := db.BeginTx(ctx, nil) - if err != nil { - return fmt.Errorf("failed to start transaction: %w", err) + var tx *sql.Tx + for { + var err error + tx, err = db.BeginTx(ctx, nil) + if err != nil { + if err.Error() == "database is locked" { + log.Warnf("database is locked, retrying in 200ms") + time.Sleep(200 * time.Millisecond) + continue + } + return err + } + break } defer tx.Rollback() //nolint:errcheck - stmtSelectEvent, err := tx.Prepare("SELECT MAX(id) from event WHERE height=? AND tipset_key=? and tipset_key_cid=? and emitter_addr=? and event_index=? and message_cid=? and message_index=? and reverted=false") - if err != nil { - return err - } - stmtEvent, err := tx.Prepare("INSERT INTO event (height, tipset_key, tipset_key_cid, emitter_addr, event_index, message_cid, message_index, reverted) VALUES(?, ?, ?, ?, ?, ?, ?, ?)") - if err != nil { - return err - } - stmtEntry, err := tx.Prepare("INSERT INTO event_entry(event_id, indexed, flags, key, codec, value) VALUES(?, ?, ?, ?, ?, ?)") - if err != nil { - return err - } - var eventsAffected int64 var entriesAffected int64 @@ -192,7 +232,7 @@ var backfillEventsCmd = &cli.Command{ // select the highest event id that exists in database, or null if none exists var entryID sql.NullInt64 - err = stmtSelectEvent.QueryRow( + err = tx.Stmt(stmtEventExists).QueryRow( currTs.Height(), currTs.Key().Bytes(), tsKeyCid.Bytes(), @@ -211,7 +251,7 @@ var backfillEventsCmd = &cli.Command{ } // event does not exist, lets backfill it - res, err := tx.Stmt(stmtEvent).Exec( + res, err := tx.Stmt(stmtInsertEvent).Exec( currTs.Height(), // height currTs.Key().Bytes(), // tipset_key tsKeyCid.Bytes(), // tipset_key_cid @@ -238,7 +278,7 @@ var backfillEventsCmd = &cli.Command{ // backfill the event entries for _, entry := range event.Entries { - _, err := tx.Stmt(stmtEntry).Exec( + _, err := tx.Stmt(stmtInsertEntry).Exec( entryID.Int64, // event_id isIndexedValue(entry.Flags), // indexed []byte{entry.Flags}, // flags @@ -312,6 +352,22 @@ var backfillEventsCmd = &cli.Command{ log.Infof("backfilling events complete, totalEventsAffected:%d, totalEntriesAffected:%d", totalEventsAffected, totalEntriesAffected) + if cctx.Bool("temporary-index") { + log.Info("dropping temporary index (tmp_event_backfill_index) on event table") + _, err := db.Exec("DROP INDEX IF EXISTS tmp_event_backfill_index;") + if err != nil { + fmt.Printf("ERROR: dropping index: %s", err) + } + } + + if cctx.Bool("vacuum") { + log.Info("running VACUUM on the database") + _, err := db.Exec("VACUUM;") + if err != nil { + return fmt.Errorf("failed to run VACUUM on the database: %w", err) + } + } + return nil }, } @@ -581,17 +637,17 @@ var backfillTxHashCmd = &cli.Command{ continue } - tx, err := ethtypes.EthTxFromSignedEthMessage(smsg) + tx, err := ethtypes.EthTransactionFromSignedFilecoinMessage(smsg) if err != nil { return fmt.Errorf("failed to convert from signed message: %w at epoch: %d", err, epoch) } - tx.Hash, err = tx.TxHash() + hash, err := tx.TxHash() if err != nil { return fmt.Errorf("failed to calculate hash for ethTx: %w at epoch: %d", err, epoch) } - res, err := insertStmt.Exec(tx.Hash.String(), smsg.Cid().String()) + res, err := insertStmt.Exec(hash.String(), smsg.Cid().String()) if err != nil { return fmt.Errorf("error inserting tx mapping to db: %s at epoch: %d", err, epoch) } @@ -602,7 +658,7 @@ var backfillTxHashCmd = &cli.Command{ } if rowsAffected > 0 { - log.Debugf("Inserted txhash %s, cid: %s at epoch: %d", tx.Hash.String(), smsg.Cid().String(), epoch) + log.Debugf("Inserted txhash %s, cid: %s at epoch: %d", hash.String(), smsg.Cid().String(), epoch) } totalRowsAffected += rowsAffected diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go index 4770f714597..911da346e97 100644 --- a/cmd/lotus-shed/main.go +++ b/cmd/lotus-shed/main.go @@ -48,7 +48,6 @@ func main() { proofsCmd, verifRegCmd, marketCmd, - miscCmd, mpoolCmd, helloCmd, genesisVerifyCmd, @@ -92,13 +91,12 @@ func main() { mismatchesCmd, blockCmd, adlCmd, - lpUtilCmd, } app := &cli.App{ Name: "lotus-shed", Usage: "A place for all the lotus tools", - Version: build.UserVersion(), + Version: string(build.NodeUserVersion()), Commands: local, Flags: []cli.Flag{ &cli.StringFlag{ diff --git a/cmd/lotus-shed/miner-multisig.go b/cmd/lotus-shed/miner-multisig.go index e8394b17a60..0b7042e83a6 100644 --- a/cmd/lotus-shed/miner-multisig.go +++ b/cmd/lotus-shed/miner-multisig.go @@ -93,7 +93,7 @@ var mmProposeWithdrawBalance = &cli.Command{ return xerrors.Errorf("proposing message: %w", err) } - fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", pcid) + _, _ = fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", pcid) // wait for it to get mined into a block wait, err := api.StateWaitMsg(ctx, pcid, build.MessageConfidence) @@ -103,7 +103,7 @@ var mmProposeWithdrawBalance = &cli.Command{ // check it executed successfully if wait.Receipt.ExitCode.IsError() { - fmt.Fprintln(cctx.App.Writer, "Propose owner change tx failed!") + _, _ = fmt.Fprintln(cctx.App.Writer, "Propose owner change tx failed!") return err } @@ -172,7 +172,7 @@ var mmApproveWithdrawBalance = &cli.Command{ return xerrors.Errorf("approving message: %w", err) } - fmt.Fprintln(cctx.App.Writer, "Approve Message CID:", acid) + _, _ = fmt.Fprintln(cctx.App.Writer, "Approve Message CID:", acid) // wait for it to get mined into a block wait, err := api.StateWaitMsg(ctx, acid, build.MessageConfidence) @@ -182,7 +182,7 @@ var mmApproveWithdrawBalance = &cli.Command{ // check it executed successfully if wait.Receipt.ExitCode.IsError() { - fmt.Fprintln(cctx.App.Writer, "Approve owner change tx failed!") + _, _ = fmt.Fprintln(cctx.App.Writer, "Approve owner change tx failed!") return err } @@ -253,7 +253,7 @@ var mmProposeChangeOwner = &cli.Command{ return xerrors.Errorf("proposing message: %w", err) } - fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", pcid) + _, _ = fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", pcid) // wait for it to get mined into a block wait, err := api.StateWaitMsg(ctx, pcid, build.MessageConfidence) @@ -263,7 +263,7 @@ var mmProposeChangeOwner = &cli.Command{ // check it executed successfully if wait.Receipt.ExitCode.IsError() { - fmt.Fprintln(cctx.App.Writer, "Propose owner change tx failed!") + _, _ = fmt.Fprintln(cctx.App.Writer, "Propose owner change tx failed!") return err } @@ -343,7 +343,7 @@ var mmApproveChangeOwner = &cli.Command{ return xerrors.Errorf("approving message: %w", err) } - fmt.Fprintln(cctx.App.Writer, "Approve Message CID:", acid) + _, _ = fmt.Fprintln(cctx.App.Writer, "Approve Message CID:", acid) // wait for it to get mined into a block wait, err := api.StateWaitMsg(ctx, acid, build.MessageConfidence) @@ -353,7 +353,7 @@ var mmApproveChangeOwner = &cli.Command{ // check it executed successfully if wait.Receipt.ExitCode.IsError() { - fmt.Fprintln(cctx.App.Writer, "Approve owner change tx failed!") + _, _ = fmt.Fprintln(cctx.App.Writer, "Approve owner change tx failed!") return err } @@ -416,8 +416,8 @@ var mmProposeChangeWorker = &cli.Command{ } } else { if mi.NewWorker == newAddr { - fmt.Fprintf(cctx.App.Writer, "Worker key change to %s successfully proposed.\n", na) - fmt.Fprintf(cctx.App.Writer, "Call 'confirm-change-worker' at or after height %d to complete.\n", mi.WorkerChangeEpoch) + _, _ = fmt.Fprintf(cctx.App.Writer, "Worker key change to %s successfully proposed.\n", na) + _, _ = fmt.Fprintf(cctx.App.Writer, "Call 'confirm-change-worker' at or after height %d to complete.\n", mi.WorkerChangeEpoch) return fmt.Errorf("change to worker address %s already pending", na) } } @@ -427,8 +427,8 @@ var mmProposeChangeWorker = &cli.Command{ NewControlAddrs: mi.ControlAddresses, } - fmt.Fprintf(cctx.App.Writer, "newAddr: %s\n", newAddr) - fmt.Fprintf(cctx.App.Writer, "NewControlAddrs: %s\n", mi.ControlAddresses) + _, _ = fmt.Fprintf(cctx.App.Writer, "newAddr: %s\n", newAddr) + _, _ = fmt.Fprintf(cctx.App.Writer, "NewControlAddrs: %s\n", mi.ControlAddresses) sp, err := actors.SerializeParams(cwp) if err != nil { @@ -440,7 +440,7 @@ var mmProposeChangeWorker = &cli.Command{ return xerrors.Errorf("proposing message: %w", err) } - fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", pcid) + _, _ = fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", pcid) // wait for it to get mined into a block wait, err := api.StateWaitMsg(ctx, pcid, build.MessageConfidence) @@ -450,7 +450,7 @@ var mmProposeChangeWorker = &cli.Command{ // check it executed successfully if wait.Receipt.ExitCode.IsError() { - fmt.Fprintln(cctx.App.Writer, "Propose worker change tx failed!") + _, _ = fmt.Fprintln(cctx.App.Writer, "Propose worker change tx failed!") return err } @@ -632,7 +632,7 @@ var mmConfirmChangeWorker = &cli.Command{ return xerrors.Errorf("proposing message: %w", err) } - fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", pcid) + _, _ = fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", pcid) // wait for it to get mined into a block wait, err := api.StateWaitMsg(ctx, pcid, build.MessageConfidence) @@ -642,7 +642,7 @@ var mmConfirmChangeWorker = &cli.Command{ // check it executed successfully if wait.Receipt.ExitCode.IsError() { - fmt.Fprintln(cctx.App.Writer, "Propose worker change tx failed!") + _, _ = fmt.Fprintln(cctx.App.Writer, "Propose worker change tx failed!") return err } @@ -839,7 +839,7 @@ var mmProposeControlSet = &cli.Command{ return xerrors.Errorf("proposing message: %w", err) } - fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", pcid) + _, _ = fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", pcid) // wait for it to get mined into a block wait, err := api.StateWaitMsg(ctx, pcid, build.MessageConfidence) @@ -849,7 +849,7 @@ var mmProposeControlSet = &cli.Command{ // check it executed successfully if wait.Receipt.ExitCode.IsError() { - fmt.Fprintln(cctx.App.Writer, "Propose worker change tx failed!") + _, _ = fmt.Fprintln(cctx.App.Writer, "Propose worker change tx failed!") return err } diff --git a/cmd/lotus-shed/miner.go b/cmd/lotus-shed/miner.go index 2f9b4ecf103..827fbf41514 100644 --- a/cmd/lotus-shed/miner.go +++ b/cmd/lotus-shed/miner.go @@ -220,6 +220,13 @@ var minerCreateCmd = &cli.Command{ Name: "create", Usage: "sends a create miner message", ArgsUsage: "[sender] [owner] [worker] [sector size]", + Flags: []cli.Flag{ + &cli.IntFlag{ + Name: "confidence", + Usage: "number of block confirmations to wait for", + Value: int(build.MessageConfidence), + }, + }, Action: func(cctx *cli.Context) error { wapi, closer, err := lcli.GetFullNodeAPI(cctx) if err != nil { @@ -274,7 +281,7 @@ var minerCreateCmd = &cli.Command{ log.Infof("Initializing worker account %s, message: %s", worker, signed.Cid()) log.Infof("Waiting for confirmation") - mw, err := wapi.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence) + mw, err := wapi.StateWaitMsg(ctx, signed.Cid(), uint64(cctx.Int("confidence"))) if err != nil { return xerrors.Errorf("waiting for worker init: %w", err) } diff --git a/cmd/lotus-shed/misc.go b/cmd/lotus-shed/misc.go deleted file mode 100644 index cfda362c497..00000000000 --- a/cmd/lotus-shed/misc.go +++ /dev/null @@ -1,40 +0,0 @@ -package main - -import ( - "fmt" - "strconv" - - "github.com/urfave/cli/v2" - - "github.com/filecoin-project/go-fil-markets/storagemarket" -) - -var miscCmd = &cli.Command{ - Name: "misc", - Usage: "Assorted unsorted commands for various purposes", - Flags: []cli.Flag{}, - Subcommands: []*cli.Command{ - dealStateMappingCmd, - }, -} - -var dealStateMappingCmd = &cli.Command{ - Name: "deal-state", - Action: func(cctx *cli.Context) error { - if !cctx.Args().Present() { - return cli.ShowCommandHelp(cctx, cctx.Command.Name) - } - - num, err := strconv.Atoi(cctx.Args().First()) - if err != nil { - return err - } - - ststr, ok := storagemarket.DealStates[uint64(num)] - if !ok { - return fmt.Errorf("no such deal state %d", num) - } - fmt.Println(ststr) - return nil - }, -} diff --git a/cmd/lotus-sim/info.go b/cmd/lotus-sim/info.go index b92fa4b2f4d..8438b67b4f1 100644 --- a/cmd/lotus-sim/info.go +++ b/cmd/lotus-sim/info.go @@ -66,17 +66,17 @@ func printInfo(ctx context.Context, sim *simulation.Simulation, out io.Writer) e startTime := time.Unix(int64(start.MinTimestamp()), 0) duration := headTime.Sub(startTime) - fmt.Fprintf(tw, "Num:\t%s\n", sim.Name()) - fmt.Fprintf(tw, "Head:\t%s\n", head) - fmt.Fprintf(tw, "Start Epoch:\t%d\n", firstEpoch) - fmt.Fprintf(tw, "End Epoch:\t%d\n", headEpoch) - fmt.Fprintf(tw, "Length:\t%d\n", headEpoch-firstEpoch) - fmt.Fprintf(tw, "Start Date:\t%s\n", startTime) - fmt.Fprintf(tw, "End Date:\t%s\n", headTime) - fmt.Fprintf(tw, "Duration:\t%.2f day(s)\n", duration.Hours()/24) - fmt.Fprintf(tw, "Capacity:\t%s\n", types.SizeStr(powerNow.RawBytePower)) - fmt.Fprintf(tw, "Daily Capacity Growth:\t%s/day\n", types.SizeStr(growthRate)) - fmt.Fprintf(tw, "Network Version:\t%d\n", sim.GetNetworkVersion()) + fmt.Fprintf(tw, "Num:\t%s\n", sim.Name()) //nolint:errcheck + fmt.Fprintf(tw, "Head:\t%s\n", head) //nolint:errcheck + fmt.Fprintf(tw, "Start Epoch:\t%d\n", firstEpoch) //nolint:errcheck + fmt.Fprintf(tw, "End Epoch:\t%d\n", headEpoch) //nolint:errcheck + fmt.Fprintf(tw, "Length:\t%d\n", headEpoch-firstEpoch) //nolint:errcheck + fmt.Fprintf(tw, "Start Date:\t%s\n", startTime) //nolint:errcheck + fmt.Fprintf(tw, "End Date:\t%s\n", headTime) //nolint:errcheck + fmt.Fprintf(tw, "Duration:\t%.2f day(s)\n", duration.Hours()/24) //nolint:errcheck + fmt.Fprintf(tw, "Capacity:\t%s\n", types.SizeStr(powerNow.RawBytePower)) //nolint:errcheck + fmt.Fprintf(tw, "Daily Capacity Growth:\t%s/day\n", types.SizeStr(growthRate)) //nolint:errcheck + fmt.Fprintf(tw, "Network Version:\t%d\n", sim.GetNetworkVersion()) //nolint:errcheck return tw.Flush() } diff --git a/cmd/lotus-sim/info_capacity.go b/cmd/lotus-sim/info_capacity.go index a92d2cde494..cca56ca57cc 100644 --- a/cmd/lotus-sim/info_capacity.go +++ b/cmd/lotus-sim/info_capacity.go @@ -60,7 +60,7 @@ var infoCapacityGrowthSimCommand = &cli.Command{ ) lastPower = newPower lastHeight = newEpoch - fmt.Fprintf(cctx.App.Writer, "%s/day\n", types.SizeStr(growthRate)) + _, _ = fmt.Fprintf(cctx.App.Writer, "%s/day\n", types.SizeStr(growthRate)) } return cctx.Err() }, diff --git a/cmd/lotus-sim/info_state.go b/cmd/lotus-sim/info_state.go index 125dae81d96..d6cb2ea4125 100644 --- a/cmd/lotus-sim/info_state.go +++ b/cmd/lotus-sim/info_state.go @@ -128,7 +128,7 @@ var infoStateGrowthSimCommand = &cli.Command{ return err } - fmt.Fprintf(cctx.App.Writer, "%d: %s\n", ts.Height(), types.SizeStr(types.NewInt(parentStateSize))) + _, _ = fmt.Fprintf(cctx.App.Writer, "%d: %s\n", ts.Height(), types.SizeStr(types.NewInt(parentStateSize))) } ts, err = sim.Node.Chainstore.LoadTipSet(cctx.Context, ts.Parents()) diff --git a/cmd/lotus-sim/info_wdpost.go b/cmd/lotus-sim/info_wdpost.go index 426e85ca831..19c4fa619bd 100644 --- a/cmd/lotus-sim/info_wdpost.go +++ b/cmd/lotus-sim/info_wdpost.go @@ -35,7 +35,7 @@ var infoWindowPostBandwidthSimCommand = &cli.Command{ var postGas, totalGas int64 printStats := func() { - fmt.Fprintf(cctx.App.Writer, "%.4f%%\n", float64(100*postGas)/float64(totalGas)) + _, _ = fmt.Fprintf(cctx.App.Writer, "%.4f%%\n", float64(100*postGas)/float64(totalGas)) } idx := 0 err = sim.Walk(cctx.Context, 0, func( diff --git a/cmd/lotus-sim/list.go b/cmd/lotus-sim/list.go index 37e767b9ab0..1273df54534 100644 --- a/cmd/lotus-sim/list.go +++ b/cmd/lotus-sim/list.go @@ -31,7 +31,7 @@ var listSimCommand = &cli.Command{ return err } head := sim.GetHead() - fmt.Fprintf(tw, "%s\t%s\t%s\n", name, head.Height(), head.Key()) + _, _ = fmt.Fprintf(tw, "%s\t%s\t%s\n", name, head.Height(), head.Key()) } return tw.Flush() }, diff --git a/cmd/lotus-sim/profile.go b/cmd/lotus-sim/profile.go index 63e0ef3bd86..e95fc4696c4 100644 --- a/cmd/lotus-sim/profile.go +++ b/cmd/lotus-sim/profile.go @@ -83,9 +83,9 @@ func profileOnSignal(cctx *cli.Context, signals ...os.Signal) { case context.Canceled: return case nil: - fmt.Fprintf(cctx.App.ErrWriter, "Wrote profile to %q\n", fname) + _, _ = fmt.Fprintf(cctx.App.ErrWriter, "Wrote profile to %q\n", fname) default: - fmt.Fprintf(cctx.App.ErrWriter, "ERROR: failed to write profile: %s\n", err) + _, _ = fmt.Fprintf(cctx.App.ErrWriter, "ERROR: failed to write profile: %s\n", err) } case <-cctx.Done(): return diff --git a/cmd/lotus-sim/run.go b/cmd/lotus-sim/run.go index a985fdf9ec9..91a8f3640e4 100644 --- a/cmd/lotus-sim/run.go +++ b/cmd/lotus-sim/run.go @@ -51,22 +51,22 @@ Signals: return err } - fmt.Fprintf(cctx.App.Writer, "advanced to %d %s\n", ts.Height(), ts.Key()) + _, _ = fmt.Fprintf(cctx.App.Writer, "advanced to %d %s\n", ts.Height(), ts.Key()) // Print select { case <-ch: - fmt.Fprintln(cctx.App.Writer, "---------------------") + _, _ = fmt.Fprintln(cctx.App.Writer, "---------------------") if err := printInfo(cctx.Context, sim, cctx.App.Writer); err != nil { - fmt.Fprintf(cctx.App.ErrWriter, "ERROR: failed to print info: %s\n", err) + _, _ = fmt.Fprintf(cctx.App.ErrWriter, "ERROR: failed to print info: %s\n", err) } - fmt.Fprintln(cctx.App.Writer, "---------------------") + _, _ = fmt.Fprintln(cctx.App.Writer, "---------------------") case <-cctx.Context.Done(): return cctx.Err() default: } } - fmt.Fprintln(cctx.App.Writer, "simulation done") + _, _ = fmt.Fprintln(cctx.App.Writer, "simulation done") return err }, } diff --git a/cmd/lotus-sim/upgrade.go b/cmd/lotus-sim/upgrade.go index dfc726d6b01..90f87baa347 100644 --- a/cmd/lotus-sim/upgrade.go +++ b/cmd/lotus-sim/upgrade.go @@ -48,10 +48,10 @@ var upgradeList = &cli.Command{ } tw := tabwriter.NewWriter(cctx.App.Writer, 8, 8, 0, ' ', 0) - fmt.Fprintf(tw, "version\theight\tepochs\tmigration\texpensive") + _, _ = fmt.Fprintf(tw, "version\theight\tepochs\tmigration\texpensive") epoch := sim.GetHead().Height() for _, upgrade := range upgrades { - fmt.Fprintf( + _, _ = fmt.Fprintf( tw, "%d\t%d\t%+d\t%t\t%t", upgrade.Network, upgrade.Height, upgrade.Height-epoch, upgrade.Migration != nil, diff --git a/cmd/lotus-sim/util.go b/cmd/lotus-sim/util.go index cd15cca0dd8..811c9b6ebf1 100644 --- a/cmd/lotus-sim/util.go +++ b/cmd/lotus-sim/util.go @@ -10,9 +10,8 @@ import ( ) func open(cctx *cli.Context) (*simulation.Node, error) { - _, _, err := ulimit.ManageFdLimit() - if err != nil { - fmt.Fprintf(cctx.App.ErrWriter, "ERROR: failed to raise ulimit: %s\n", err) + if _, _, err := ulimit.ManageFdLimit(); err != nil { + _, _ = fmt.Fprintf(cctx.App.ErrWriter, "ERROR: failed to raise ulimit: %s\n", err) } return simulation.OpenNode(cctx.Context, cctx.String("repo")) } diff --git a/cmd/lotus-stats/main.go b/cmd/lotus-stats/main.go index 20971c1f3ef..16c62595228 100644 --- a/cmd/lotus-stats/main.go +++ b/cmd/lotus-stats/main.go @@ -42,7 +42,7 @@ func main() { app := &cli.App{ Name: "lotus-stats", Usage: "Collect basic information about a filecoin network using lotus", - Version: build.UserVersion(), + Version: string(build.NodeUserVersion()), Flags: []cli.Flag{ &cli.StringFlag{ Name: "lotus-path", diff --git a/cmd/lotus-wallet/main.go b/cmd/lotus-wallet/main.go index 8360dae15d0..84818ecb139 100644 --- a/cmd/lotus-wallet/main.go +++ b/cmd/lotus-wallet/main.go @@ -52,7 +52,7 @@ func main() { app := &cli.App{ Name: "lotus-wallet", Usage: "Basic external wallet", - Version: build.UserVersion(), + Version: string(build.NodeUserVersion()), Description: ` lotus-wallet provides a remote wallet service for lotus. diff --git a/cmd/lotus-worker/main.go b/cmd/lotus-worker/main.go index 41af11bdd04..15283a30bb9 100644 --- a/cmd/lotus-worker/main.go +++ b/cmd/lotus-worker/main.go @@ -70,7 +70,7 @@ func main() { app := &cli.App{ Name: "lotus-worker", Usage: "Remote miner worker", - Version: build.UserVersion(), + Version: string(build.MinerUserVersion()), EnableBashCompletion: true, Flags: []cli.Flag{ &cli.StringFlag{ @@ -104,7 +104,7 @@ func main() { After: func(c *cli.Context) error { if r := recover(); r != nil { // Generate report in LOTUS_PANIC_REPORT_PATH and re-raise panic - build.GeneratePanicReport(c.String("panic-reports"), c.String(FlagWorkerRepo), c.App.Name) + build.GenerateMinerPanicReport(c.String("panic-reports"), c.String(FlagWorkerRepo), c.App.Name) panic(r) } return nil diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index 369938020f5..ba2936e2a04 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -209,7 +209,7 @@ var DaemonCmd = &cli.Command{ } ctx, _ := tag.New(context.Background(), - tag.Insert(metrics.Version, build.BuildVersion), + tag.Insert(metrics.Version, build.NodeBuildVersion), tag.Insert(metrics.Commit, build.CurrentCommit), tag.Insert(metrics.NodeType, "chain"), ) diff --git a/cmd/lotus/main.go b/cmd/lotus/main.go index fce9a6136f4..6b29f6cf83a 100644 --- a/cmd/lotus/main.go +++ b/cmd/lotus/main.go @@ -72,7 +72,7 @@ func main() { app := &cli.App{ Name: "lotus", Usage: "Filecoin decentralized storage network client", - Version: build.UserVersion(), + Version: string(build.NodeUserVersion()), EnableBashCompletion: true, Flags: []cli.Flag{ &cli.StringFlag{ @@ -107,7 +107,7 @@ func main() { After: func(c *cli.Context) error { if r := recover(); r != nil { // Generate report in LOTUS_PATH and re-raise panic - build.GeneratePanicReport(c.String("panic-reports"), c.String("repo"), c.App.Name) + build.GenerateNodePanicReport(c.String("panic-reports"), c.String("repo"), c.App.Name) panic(r) } return nil diff --git a/cmd/sptool/actor.go b/cmd/sptool/actor.go deleted file mode 100644 index f17568a3115..00000000000 --- a/cmd/sptool/actor.go +++ /dev/null @@ -1,141 +0,0 @@ -package main - -import ( - "fmt" - "os" - "strings" - - "github.com/fatih/color" - "github.com/urfave/cli/v2" - - "github.com/filecoin-project/go-address" - - builtin2 "github.com/filecoin-project/lotus/chain/actors/builtin" - "github.com/filecoin-project/lotus/chain/types" - lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/cli/spcli" - "github.com/filecoin-project/lotus/lib/tablewriter" -) - -var actorCmd = &cli.Command{ - Name: "actor", - Usage: "Manage Filecoin Miner Actor Metadata", - Subcommands: []*cli.Command{ - spcli.ActorSetAddrsCmd(SPTActorGetter), - spcli.ActorWithdrawCmd(SPTActorGetter), - spcli.ActorRepayDebtCmd(SPTActorGetter), - spcli.ActorSetPeeridCmd(SPTActorGetter), - spcli.ActorSetOwnerCmd(SPTActorGetter), - spcli.ActorControlCmd(SPTActorGetter, actorControlListCmd(SPTActorGetter)), - spcli.ActorProposeChangeWorkerCmd(SPTActorGetter), - spcli.ActorConfirmChangeWorkerCmd(SPTActorGetter), - spcli.ActorCompactAllocatedCmd(SPTActorGetter), - spcli.ActorProposeChangeBeneficiaryCmd(SPTActorGetter), - spcli.ActorConfirmChangeBeneficiaryCmd(SPTActorGetter), - spcli.ActorNewMinerCmd, - }, -} - -func actorControlListCmd(getActor spcli.ActorAddressGetter) *cli.Command { - return &cli.Command{ - Name: "list", - Usage: "Get currently set control addresses. Note: This excludes most roles as they are not known to the immediate chain state.", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "verbose", - }, - }, - Action: func(cctx *cli.Context) error { - api, acloser, err := lcli.GetFullNodeAPIV1(cctx) - if err != nil { - return err - } - defer acloser() - - ctx := lcli.ReqContext(cctx) - - maddr, err := getActor(cctx) - if err != nil { - return err - } - - mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return err - } - - tw := tablewriter.New( - tablewriter.Col("name"), - tablewriter.Col("ID"), - tablewriter.Col("key"), - tablewriter.Col("use"), - tablewriter.Col("balance"), - ) - - post := map[address.Address]struct{}{} - - for _, ca := range mi.ControlAddresses { - post[ca] = struct{}{} - } - - printKey := func(name string, a address.Address) { - var actor *types.Actor - if actor, err = api.StateGetActor(ctx, a, types.EmptyTSK); err != nil { - fmt.Printf("%s\t%s: error getting actor: %s\n", name, a, err) - return - } - b := actor.Balance - - var k = a - // 'a' maybe a 'robust', in that case, 'StateAccountKey' returns an error. - if builtin2.IsAccountActor(actor.Code) { - if k, err = api.StateAccountKey(ctx, a, types.EmptyTSK); err != nil { - fmt.Printf("%s\t%s: error getting account key: %s\n", name, a, err) - return - } - } - kstr := k.String() - if !cctx.Bool("verbose") { - if len(kstr) > 9 { - kstr = kstr[:6] + "..." - } - } - - bstr := types.FIL(b).String() - switch { - case b.LessThan(types.FromFil(10)): - bstr = color.RedString(bstr) - case b.LessThan(types.FromFil(50)): - bstr = color.YellowString(bstr) - default: - bstr = color.GreenString(bstr) - } - - var uses []string - if a == mi.Worker { - uses = append(uses, color.YellowString("other")) - } - if _, ok := post[a]; ok { - uses = append(uses, color.GreenString("post")) - } - - tw.Write(map[string]interface{}{ - "name": name, - "ID": a, - "key": kstr, - "use": strings.Join(uses, " "), - "balance": bstr, - }) - } - - printKey("owner", mi.Owner) - printKey("worker", mi.Worker) - printKey("beneficiary", mi.Beneficiary) - for i, ca := range mi.ControlAddresses { - printKey(fmt.Sprintf("control-%d", i), ca) - } - - return tw.Flush(os.Stdout) - }, - } -} diff --git a/cmd/sptool/main.go b/cmd/sptool/main.go deleted file mode 100644 index 7970b8db37c..00000000000 --- a/cmd/sptool/main.go +++ /dev/null @@ -1,84 +0,0 @@ -package main - -import ( - "context" - "fmt" - "os" - "os/signal" - - logging "github.com/ipfs/go-log/v2" - "github.com/urfave/cli/v2" - - "github.com/filecoin-project/go-address" - - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/cli/spcli" -) - -var log = logging.Logger("sptool") - -func main() { - local := []*cli.Command{ - actorCmd, - spcli.InfoCmd(SPTActorGetter), - sectorsCmd, - provingCmd, - //multiSigCmd, - } - - app := &cli.App{ - Name: "sptool", - Usage: "Manage Filecoin Miner Actor", - Version: build.UserVersion(), - Commands: local, - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "repo", - EnvVars: []string{"LOTUS_PATH"}, - Hidden: true, - Value: "~/.lotus", // TODO: Consider XDG_DATA_HOME - }, - &cli.StringFlag{ - Name: "log-level", - Value: "info", - }, - &cli.StringFlag{ - Name: "actor", - Required: os.Getenv("LOTUS_DOCS_GENERATION") != "1", - Usage: "miner actor to manage", - EnvVars: []string{"SP_ADDRESS"}, - }, - }, - Before: func(cctx *cli.Context) error { - return logging.SetLogLevel("sptool", cctx.String("sptool")) - }, - } - - // terminate early on ctrl+c - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt) - ctx, cancel := context.WithCancel(context.Background()) - go func() { - <-c - cancel() - fmt.Println("Received interrupt, shutting down... Press CTRL+C again to force shutdown") - <-c - fmt.Println("Forcing stop") - os.Exit(1) - }() - - if err := app.RunContext(ctx, os.Args); err != nil { - log.Errorf("%+v", err) - os.Exit(1) - return - } - -} - -func SPTActorGetter(cctx *cli.Context) (address.Address, error) { - addr, err := address.NewFromString(cctx.String("actor")) - if err != nil { - return address.Undef, fmt.Errorf("parsing address: %w", err) - } - return addr, nil -} diff --git a/cmd/sptool/proving.go b/cmd/sptool/proving.go deleted file mode 100644 index 87c67b5f4e5..00000000000 --- a/cmd/sptool/proving.go +++ /dev/null @@ -1,18 +0,0 @@ -package main - -import ( - "github.com/urfave/cli/v2" - - "github.com/filecoin-project/lotus/cli/spcli" -) - -var provingCmd = &cli.Command{ - Name: "proving", - Usage: "View proving information", - Subcommands: []*cli.Command{ - spcli.ProvingInfoCmd(SPTActorGetter), - spcli.ProvingDeadlinesCmd(SPTActorGetter), - spcli.ProvingDeadlineInfoCmd(SPTActorGetter), - spcli.ProvingFaultsCmd(SPTActorGetter), - }, -} diff --git a/cmd/sptool/sector.go b/cmd/sptool/sector.go deleted file mode 100644 index 8f341b5ccc2..00000000000 --- a/cmd/sptool/sector.go +++ /dev/null @@ -1,355 +0,0 @@ -package main - -import ( - "fmt" - "os" - "sort" - - "github.com/docker/go-units" - "github.com/fatih/color" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - - "github.com/filecoin-project/lotus/blockstore" - "github.com/filecoin-project/lotus/chain/actors/adt" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/actors/policy" - "github.com/filecoin-project/lotus/chain/types" - lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/cli/spcli" - cliutil "github.com/filecoin-project/lotus/cli/util" - "github.com/filecoin-project/lotus/lib/tablewriter" -) - -var sectorsCmd = &cli.Command{ - Name: "sectors", - Usage: "interact with sector store", - Subcommands: []*cli.Command{ - spcli.SectorsStatusCmd(SPTActorGetter, nil), - sectorsListCmd, // in-house b/c chain-only is so different. Needs Curio *web* implementation - spcli.SectorPreCommitsCmd(SPTActorGetter), - spcli.SectorsCheckExpireCmd(SPTActorGetter), - sectorsExpiredCmd, // in-house b/c chain-only is so different - spcli.SectorsExtendCmd(SPTActorGetter), - spcli.TerminateSectorCmd(SPTActorGetter), - spcli.SectorsCompactPartitionsCmd(SPTActorGetter), - }} - -var sectorsExpiredCmd = &cli.Command{ - Name: "expired", - Usage: "Get or cleanup expired sectors", - Flags: []cli.Flag{ - &cli.Int64Flag{ - Name: "expired-epoch", - Usage: "epoch at which to check sector expirations", - DefaultText: "WinningPoSt lookback epoch", - }, - }, - Action: func(cctx *cli.Context) error { - fullApi, nCloser, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return xerrors.Errorf("getting fullnode api: %w", err) - } - defer nCloser() - ctx := lcli.ReqContext(cctx) - - head, err := fullApi.ChainHead(ctx) - if err != nil { - return xerrors.Errorf("getting chain head: %w", err) - } - - lbEpoch := abi.ChainEpoch(cctx.Int64("expired-epoch")) - if !cctx.IsSet("expired-epoch") { - nv, err := fullApi.StateNetworkVersion(ctx, head.Key()) - if err != nil { - return xerrors.Errorf("getting network version: %w", err) - } - - lbEpoch = head.Height() - policy.GetWinningPoStSectorSetLookback(nv) - if lbEpoch < 0 { - return xerrors.Errorf("too early to terminate sectors") - } - } - - if cctx.IsSet("confirm-remove-count") && !cctx.IsSet("expired-epoch") { - return xerrors.Errorf("--expired-epoch must be specified with --confirm-remove-count") - } - - lbts, err := fullApi.ChainGetTipSetByHeight(ctx, lbEpoch, head.Key()) - if err != nil { - return xerrors.Errorf("getting lookback tipset: %w", err) - } - - maddr, err := SPTActorGetter(cctx) - if err != nil { - return xerrors.Errorf("getting actor address: %w", err) - } - - // toCheck is a working bitfield which will only contain terminated sectors - toCheck := bitfield.New() - { - sectors, err := fullApi.StateMinerSectors(ctx, maddr, nil, lbts.Key()) - if err != nil { - return xerrors.Errorf("getting sector on chain info: %w", err) - } - - for _, sector := range sectors { - if sector.Expiration <= lbts.Height() { - toCheck.Set(uint64(sector.SectorNumber)) - } - } - } - - mact, err := fullApi.StateGetActor(ctx, maddr, lbts.Key()) - if err != nil { - return err - } - - tbs := blockstore.NewTieredBstore(blockstore.NewAPIBlockstore(fullApi), blockstore.NewMemory()) - mas, err := miner.Load(adt.WrapStore(ctx, cbor.NewCborStore(tbs)), mact) - if err != nil { - return err - } - - alloc, err := mas.GetAllocatedSectors() - if err != nil { - return xerrors.Errorf("getting allocated sectors: %w", err) - } - - // only allocated sectors can be expired, - toCheck, err = bitfield.IntersectBitField(toCheck, *alloc) - if err != nil { - return xerrors.Errorf("intersecting bitfields: %w", err) - } - - if err := mas.ForEachDeadline(func(dlIdx uint64, dl miner.Deadline) error { - return dl.ForEachPartition(func(partIdx uint64, part miner.Partition) error { - live, err := part.LiveSectors() - if err != nil { - return err - } - - toCheck, err = bitfield.SubtractBitField(toCheck, live) - if err != nil { - return err - } - - unproven, err := part.UnprovenSectors() - if err != nil { - return err - } - - toCheck, err = bitfield.SubtractBitField(toCheck, unproven) - - return err - }) - }); err != nil { - return err - } - - err = mas.ForEachPrecommittedSector(func(pci miner.SectorPreCommitOnChainInfo) error { - toCheck.Unset(uint64(pci.Info.SectorNumber)) - return nil - }) - if err != nil { - return err - } - - // toCheck now only contains sectors which either failed to precommit or are expired/terminated - fmt.Printf("Sectors that either failed to precommit or are expired/terminated:\n") - - err = toCheck.ForEach(func(u uint64) error { - fmt.Println(abi.SectorNumber(u)) - - return nil - }) - if err != nil { - return err - } - - return nil - }, -} - -var sectorsListCmd = &cli.Command{ - Name: "list", - Usage: "List sectors", - Flags: []cli.Flag{ - /* - &cli.BoolFlag{ - Name: "show-removed", - Usage: "show removed sectors", - Aliases: []string{"r"}, - }, - &cli.BoolFlag{ - Name: "fast", - Usage: "don't show on-chain info for better performance", - Aliases: []string{"f"}, - }, - &cli.BoolFlag{ - Name: "events", - Usage: "display number of events the sector has received", - Aliases: []string{"e"}, - }, - &cli.BoolFlag{ - Name: "initial-pledge", - Usage: "display initial pledge", - Aliases: []string{"p"}, - }, - &cli.BoolFlag{ - Name: "seal-time", - Usage: "display how long it took for the sector to be sealed", - Aliases: []string{"t"}, - }, - &cli.StringFlag{ - Name: "states", - Usage: "filter sectors by a comma-separated list of states", - }, - &cli.BoolFlag{ - Name: "unproven", - Usage: "only show sectors which aren't in the 'Proving' state", - Aliases: []string{"u"}, - }, - */ - }, - Subcommands: []*cli.Command{ - //sectorsListUpgradeBoundsCmd, - }, - Action: func(cctx *cli.Context) error { - fullApi, closer2, err := lcli.GetFullNodeAPI(cctx) // TODO: consider storing full node address in config - if err != nil { - return err - } - defer closer2() - - ctx := lcli.ReqContext(cctx) - - maddr, err := SPTActorGetter(cctx) - if err != nil { - return err - } - - head, err := fullApi.ChainHead(ctx) - if err != nil { - return err - } - - activeSet, err := fullApi.StateMinerActiveSectors(ctx, maddr, head.Key()) - if err != nil { - return err - } - activeIDs := make(map[abi.SectorNumber]struct{}, len(activeSet)) - for _, info := range activeSet { - activeIDs[info.SectorNumber] = struct{}{} - } - - sset, err := fullApi.StateMinerSectors(ctx, maddr, nil, head.Key()) - if err != nil { - return err - } - commitedIDs := make(map[abi.SectorNumber]struct{}, len(sset)) - for _, info := range sset { - commitedIDs[info.SectorNumber] = struct{}{} - } - - sort.Slice(sset, func(i, j int) bool { - return sset[i].SectorNumber < sset[j].SectorNumber - }) - - tw := tablewriter.New( - tablewriter.Col("ID"), - tablewriter.Col("State"), - tablewriter.Col("OnChain"), - tablewriter.Col("Active"), - tablewriter.Col("Expiration"), - tablewriter.Col("SealTime"), - tablewriter.Col("Events"), - tablewriter.Col("Deals"), - tablewriter.Col("DealWeight"), - tablewriter.Col("VerifiedPower"), - tablewriter.Col("Pledge"), - tablewriter.NewLineCol("Error"), - tablewriter.NewLineCol("RecoveryTimeout")) - - fast := cctx.Bool("fast") - - for _, st := range sset { - s := st.SectorNumber - _, inSSet := commitedIDs[s] - _, inASet := activeIDs[s] - - const verifiedPowerGainMul = 9 - dw, vp := .0, .0 - { - rdw := big.Add(st.DealWeight, st.VerifiedDealWeight) - dw = float64(big.Div(rdw, big.NewInt(int64(st.Expiration-st.Activation))).Uint64()) - vp = float64(big.Div(big.Mul(st.VerifiedDealWeight, big.NewInt(verifiedPowerGainMul)), big.NewInt(int64(st.Expiration-st.Activation))).Uint64()) - } - - var deals int - for _, deal := range st.DealIDs { - if deal != 0 { - deals++ - } - } - - exp := st.Expiration - // if st.OnTime > 0 && st.OnTime < exp { - // exp = st.OnTime // Can be different when the sector was CC upgraded - // } - - m := map[string]interface{}{ - "ID": s, - //"State": color.New(spcli.StateOrder[sealing.SectorState(st.State)].Col).Sprint(st.State), - "OnChain": yesno(inSSet), - "Active": yesno(inASet), - } - - if deals > 0 { - m["Deals"] = color.GreenString("%d", deals) - } else { - m["Deals"] = color.BlueString("CC") - // if st.ToUpgrade { - // m["Deals"] = color.CyanString("CC(upgrade)") - // } - } - - if !fast { - if !inSSet { - m["Expiration"] = "n/a" - } else { - m["Expiration"] = cliutil.EpochTime(head.Height(), exp) - // if st.Early > 0 { - // m["RecoveryTimeout"] = color.YellowString(cliutil.EpochTime(head.Height(), st.Early)) - // } - } - if inSSet && cctx.Bool("initial-pledge") { - m["Pledge"] = types.FIL(st.InitialPledge).Short() - } - } - - if !fast && deals > 0 { - m["DealWeight"] = units.BytesSize(dw) - if vp > 0 { - m["VerifiedPower"] = color.GreenString(units.BytesSize(vp)) - } - } - - tw.Write(m) - } - - return tw.Flush(os.Stdout) - }, -} - -func yesno(b bool) string { - if b { - return color.GreenString("YES") - } - return color.RedString("NO") -} diff --git a/curiosrc/address.go b/curiosrc/address.go deleted file mode 100644 index 6d1738f2dc3..00000000000 --- a/curiosrc/address.go +++ /dev/null @@ -1,64 +0,0 @@ -package curio - -import ( - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/curiosrc/multictladdr" - "github.com/filecoin-project/lotus/node/config" -) - -func AddressSelector(addrConf []config.CurioAddresses) func() (*multictladdr.MultiAddressSelector, error) { - return func() (*multictladdr.MultiAddressSelector, error) { - as := &multictladdr.MultiAddressSelector{ - MinerMap: make(map[address.Address]api.AddressConfig), - } - if addrConf == nil { - return as, nil - } - - for _, addrConf := range addrConf { - for _, minerID := range addrConf.MinerAddresses { - tmp := api.AddressConfig{ - DisableOwnerFallback: addrConf.DisableOwnerFallback, - DisableWorkerFallback: addrConf.DisableWorkerFallback, - } - - for _, s := range addrConf.PreCommitControl { - addr, err := address.NewFromString(s) - if err != nil { - return nil, xerrors.Errorf("parsing precommit control address: %w", err) - } - - tmp.PreCommitControl = append(tmp.PreCommitControl, addr) - } - - for _, s := range addrConf.CommitControl { - addr, err := address.NewFromString(s) - if err != nil { - return nil, xerrors.Errorf("parsing commit control address: %w", err) - } - - tmp.CommitControl = append(tmp.CommitControl, addr) - } - - for _, s := range addrConf.TerminateControl { - addr, err := address.NewFromString(s) - if err != nil { - return nil, xerrors.Errorf("parsing terminate control address: %w", err) - } - - tmp.TerminateControl = append(tmp.TerminateControl, addr) - } - a, err := address.NewFromString(minerID) - if err != nil { - return nil, xerrors.Errorf("parsing miner address %s: %w", minerID, err) - } - as.MinerMap[a] = tmp - } - } - return as, nil - } -} diff --git a/curiosrc/builder.go b/curiosrc/builder.go deleted file mode 100644 index 3cd4bd0cdd2..00000000000 --- a/curiosrc/builder.go +++ /dev/null @@ -1,46 +0,0 @@ -package curio - -import ( - "context" - "time" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/curiosrc/chainsched" - "github.com/filecoin-project/lotus/curiosrc/message" - "github.com/filecoin-project/lotus/curiosrc/multictladdr" - "github.com/filecoin-project/lotus/curiosrc/window" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/node/config" - dtypes "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/storage/paths" - "github.com/filecoin-project/lotus/storage/sealer" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -//var log = logging.Logger("provider") - -func WindowPostScheduler(ctx context.Context, fc config.CurioFees, pc config.CurioProvingConfig, - api api.FullNode, verif storiface.Verifier, lw *sealer.LocalWorker, sender *message.Sender, chainSched *chainsched.CurioChainSched, - as *multictladdr.MultiAddressSelector, addresses map[dtypes.MinerAddress]bool, db *harmonydb.DB, - stor paths.Store, idx paths.SectorIndex, max int) (*window.WdPostTask, *window.WdPostSubmitTask, *window.WdPostRecoverDeclareTask, error) { - - // todo config - ft := window.NewSimpleFaultTracker(stor, idx, pc.ParallelCheckLimit, time.Duration(pc.SingleCheckTimeout), time.Duration(pc.PartitionCheckTimeout)) - - computeTask, err := window.NewWdPostTask(db, api, ft, lw, verif, chainSched, addresses, max) - if err != nil { - return nil, nil, nil, err - } - - submitTask, err := window.NewWdPostSubmitTask(chainSched, sender, db, api, fc.MaxWindowPoStGasFee, as) - if err != nil { - return nil, nil, nil, err - } - - recoverTask, err := window.NewWdPostRecoverDeclareTask(sender, db, api, ft, as, chainSched, fc.MaxWindowPoStGasFee, addresses) - if err != nil { - return nil, nil, nil, err - } - - return computeTask, submitTask, recoverTask, nil -} diff --git a/curiosrc/chainsched/chain_sched.go b/curiosrc/chainsched/chain_sched.go deleted file mode 100644 index 42a387fbc2a..00000000000 --- a/curiosrc/chainsched/chain_sched.go +++ /dev/null @@ -1,136 +0,0 @@ -package chainsched - -import ( - "context" - "time" - - logging "github.com/ipfs/go-log/v2" - "go.opencensus.io/trace" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" -) - -var log = logging.Logger("curio/chainsched") - -type NodeAPI interface { - ChainHead(context.Context) (*types.TipSet, error) - ChainNotify(context.Context) (<-chan []*api.HeadChange, error) -} - -type CurioChainSched struct { - api NodeAPI - - callbacks []UpdateFunc - started bool -} - -func New(api NodeAPI) *CurioChainSched { - return &CurioChainSched{ - api: api, - } -} - -type UpdateFunc func(ctx context.Context, revert, apply *types.TipSet) error - -func (s *CurioChainSched) AddHandler(ch UpdateFunc) error { - if s.started { - return xerrors.Errorf("cannot add handler after start") - } - - s.callbacks = append(s.callbacks, ch) - return nil -} - -func (s *CurioChainSched) Run(ctx context.Context) { - s.started = true - - var ( - notifs <-chan []*api.HeadChange - err error - gotCur bool - ) - - // not fine to panic after this point - for { - if notifs == nil { - notifs, err = s.api.ChainNotify(ctx) - if err != nil { - log.Errorf("ChainNotify error: %+v", err) - - build.Clock.Sleep(10 * time.Second) - continue - } - - gotCur = false - log.Info("restarting chain scheduler") - } - - select { - case changes, ok := <-notifs: - if !ok { - log.Warn("chain notifs channel closed") - notifs = nil - continue - } - - if !gotCur { - if len(changes) != 1 { - log.Errorf("expected first notif to have len = 1") - continue - } - chg := changes[0] - if chg.Type != store.HCCurrent { - log.Errorf("expected first notif to tell current ts") - continue - } - - ctx, span := trace.StartSpan(ctx, "CurioChainSched.headChange") - - s.update(ctx, nil, chg.Val) - - span.End() - gotCur = true - continue - } - - ctx, span := trace.StartSpan(ctx, "CurioChainSched.headChange") - - var lowest, highest *types.TipSet = nil, nil - - for _, change := range changes { - if change.Val == nil { - log.Errorf("change.Val was nil") - } - switch change.Type { - case store.HCRevert: - lowest = change.Val - case store.HCApply: - highest = change.Val - } - } - - s.update(ctx, lowest, highest) - - span.End() - case <-ctx.Done(): - return - } - } -} - -func (s *CurioChainSched) update(ctx context.Context, revert, apply *types.TipSet) { - if apply == nil { - log.Error("no new tipset in CurioChainSched.update") - return - } - - for _, ch := range s.callbacks { - if err := ch(ctx, revert, apply); err != nil { - log.Errorf("handling head updates in curio chain sched: %+v", err) - } - } -} diff --git a/curiosrc/ffi/piece_funcs.go b/curiosrc/ffi/piece_funcs.go deleted file mode 100644 index a548f5cc2df..00000000000 --- a/curiosrc/ffi/piece_funcs.go +++ /dev/null @@ -1,76 +0,0 @@ -package ffi - -import ( - "context" - "io" - "os" - "time" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -func (sb *SealCalls) WritePiece(ctx context.Context, taskID *harmonytask.TaskID, pieceID storiface.PieceNumber, size int64, data io.Reader) error { - // todo: config(?): allow setting PathStorage for this - // todo storage reservations - paths, _, done, err := sb.sectors.AcquireSector(ctx, taskID, pieceID.Ref(), storiface.FTNone, storiface.FTPiece, storiface.PathSealing) - if err != nil { - return err - } - defer done() - - dest := paths.Piece - tempDest := dest + ".tmp" - - destFile, err := os.OpenFile(tempDest, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return xerrors.Errorf("creating temp piece file '%s': %w", tempDest, err) - } - - removeTemp := true - defer func() { - if removeTemp { - rerr := os.Remove(tempDest) - if rerr != nil { - log.Errorf("removing temp file: %+v", rerr) - } - } - }() - - copyStart := time.Now() - - n, err := io.CopyBuffer(destFile, io.LimitReader(data, size), make([]byte, 8<<20)) - if err != nil { - _ = destFile.Close() - return xerrors.Errorf("copying piece data: %w", err) - } - - if err := destFile.Close(); err != nil { - return xerrors.Errorf("closing temp piece file: %w", err) - } - - if n != size { - return xerrors.Errorf("short write: %d", n) - } - - copyEnd := time.Now() - - log.Infow("wrote parked piece", "piece", pieceID, "size", size, "duration", copyEnd.Sub(copyStart), "dest", dest, "MiB/s", float64(size)/(1<<20)/copyEnd.Sub(copyStart).Seconds()) - - if err := os.Rename(tempDest, dest); err != nil { - return xerrors.Errorf("rename temp piece to dest %s -> %s: %w", tempDest, dest, err) - } - - removeTemp = false - return nil -} - -func (sb *SealCalls) PieceReader(ctx context.Context, id storiface.PieceNumber) (io.ReadCloser, error) { - return sb.sectors.storage.ReaderSeq(ctx, id.Ref(), storiface.FTPiece) -} - -func (sb *SealCalls) RemovePiece(ctx context.Context, id storiface.PieceNumber) error { - return sb.sectors.storage.Remove(ctx, id.Ref().ID, storiface.FTPiece, true, nil) -} diff --git a/curiosrc/ffi/sdr_funcs.go b/curiosrc/ffi/sdr_funcs.go deleted file mode 100644 index e9ce62831de..00000000000 --- a/curiosrc/ffi/sdr_funcs.go +++ /dev/null @@ -1,627 +0,0 @@ -package ffi - -import ( - "context" - "encoding/json" - "fmt" - "io" - "os" - "path/filepath" - - "github.com/KarpelesLab/reflink" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - "github.com/puzpuzpuz/xsync/v2" - "golang.org/x/xerrors" - - ffi "github.com/filecoin-project/filecoin-ffi" - commcid "github.com/filecoin-project/go-fil-commcid" - "github.com/filecoin-project/go-state-types/abi" - proof2 "github.com/filecoin-project/go-state-types/proof" - - "github.com/filecoin-project/lotus/curiosrc/proof" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/storage/paths" - "github.com/filecoin-project/lotus/storage/sealer/proofpaths" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -var log = logging.Logger("lpffi") - -/* -type ExternPrecommit2 func(ctx context.Context, sector storiface.SectorRef, cache, sealed string, pc1out storiface.PreCommit1Out) (sealedCID cid.Cid, unsealedCID cid.Cid, err error) - - type ExternalSealer struct { - PreCommit2 ExternPrecommit2 - } -*/ -type SealCalls struct { - sectors *storageProvider - - /*// externCalls cointain overrides for calling alternative sealing logic - externCalls ExternalSealer*/ -} - -func NewSealCalls(st *paths.Remote, ls *paths.Local, si paths.SectorIndex) *SealCalls { - return &SealCalls{ - sectors: &storageProvider{ - storage: st, - localStore: ls, - sindex: si, - storageReservations: xsync.NewIntegerMapOf[harmonytask.TaskID, *StorageReservation](), - }, - } -} - -type storageProvider struct { - storage *paths.Remote - localStore *paths.Local - sindex paths.SectorIndex - storageReservations *xsync.MapOf[harmonytask.TaskID, *StorageReservation] -} - -func (l *storageProvider) AcquireSector(ctx context.Context, taskID *harmonytask.TaskID, sector storiface.SectorRef, existing, allocate storiface.SectorFileType, sealing storiface.PathType) (fspaths, ids storiface.SectorPaths, release func(), err error) { - var paths, storageIDs storiface.SectorPaths - var releaseStorage func() - - var ok bool - var resv *StorageReservation - if taskID != nil { - resv, ok = l.storageReservations.Load(*taskID) - } - if ok && resv != nil { - if resv.Alloc != allocate || resv.Existing != existing { - // this should never happen, only when task definition is wrong - return storiface.SectorPaths{}, storiface.SectorPaths{}, nil, xerrors.Errorf("storage reservation type mismatch") - } - - log.Debugw("using existing storage reservation", "task", taskID, "sector", sector, "existing", existing, "allocate", allocate) - - paths = resv.Paths - storageIDs = resv.PathIDs - releaseStorage = resv.Release - - if len(existing.AllSet()) > 0 { - // there are some "existing" files in the reservation. Some of them may need fetching, so call l.storage.AcquireSector - // (which unlike in the reservation code will be called on the paths.Remote instance) to ensure that the files are - // present locally. Note that we do not care about 'allocate' reqeuests, those files don't exist, and are just - // proposed paths with a reservation of space. - - _, checkPathIDs, err := l.storage.AcquireSector(ctx, sector, existing, storiface.FTNone, sealing, storiface.AcquireMove, storiface.AcquireInto(storiface.PathsWithIDs{Paths: paths, IDs: storageIDs})) - if err != nil { - return storiface.SectorPaths{}, storiface.SectorPaths{}, nil, xerrors.Errorf("acquire reserved existing files: %w", err) - } - - // assert that checkPathIDs is the same as storageIDs - if storageIDs.Subset(existing) != checkPathIDs.Subset(existing) { - return storiface.SectorPaths{}, storiface.SectorPaths{}, nil, xerrors.Errorf("acquire reserved existing files: pathIDs mismatch %#v != %#v", storageIDs, checkPathIDs) - } - } - } else { - // No related reservation, acquire storage as usual - - var err error - paths, storageIDs, err = l.storage.AcquireSector(ctx, sector, existing, allocate, sealing, storiface.AcquireMove) - if err != nil { - return storiface.SectorPaths{}, storiface.SectorPaths{}, nil, err - } - - releaseStorage, err = l.localStore.Reserve(ctx, sector, allocate, storageIDs, storiface.FSOverheadSeal) - if err != nil { - return storiface.SectorPaths{}, storiface.SectorPaths{}, nil, xerrors.Errorf("reserving storage space: %w", err) - } - } - - log.Debugf("acquired sector %d (e:%d; a:%d): %v", sector, existing, allocate, paths) - - return paths, storageIDs, func() { - releaseStorage() - - for _, fileType := range storiface.PathTypes { - if fileType&allocate == 0 { - continue - } - - sid := storiface.PathByType(storageIDs, fileType) - if err := l.sindex.StorageDeclareSector(ctx, storiface.ID(sid), sector.ID, fileType, true); err != nil { - log.Errorf("declare sector error: %+v", err) - } - } - }, nil -} - -func (sb *SealCalls) GenerateSDR(ctx context.Context, taskID harmonytask.TaskID, sector storiface.SectorRef, ticket abi.SealRandomness, commKcid cid.Cid) error { - paths, pathIDs, releaseSector, err := sb.sectors.AcquireSector(ctx, &taskID, sector, storiface.FTNone, storiface.FTCache, storiface.PathSealing) - if err != nil { - return xerrors.Errorf("acquiring sector paths: %w", err) - } - defer releaseSector() - - // prepare SDR params - commp, err := commcid.CIDToDataCommitmentV1(commKcid) - if err != nil { - return xerrors.Errorf("computing commK: %w", err) - } - - replicaID, err := sector.ProofType.ReplicaId(sector.ID.Miner, sector.ID.Number, ticket, commp) - if err != nil { - return xerrors.Errorf("computing replica id: %w", err) - } - - // make sure the cache dir is empty - if err := os.RemoveAll(paths.Cache); err != nil { - return xerrors.Errorf("removing cache dir: %w", err) - } - if err := os.MkdirAll(paths.Cache, 0755); err != nil { - return xerrors.Errorf("mkdir cache dir: %w", err) - } - - // generate new sector key - err = ffi.GenerateSDR( - sector.ProofType, - paths.Cache, - replicaID, - ) - if err != nil { - return xerrors.Errorf("generating SDR %d (%s): %w", sector.ID.Number, paths.Unsealed, err) - } - - if err := sb.ensureOneCopy(ctx, sector.ID, pathIDs, storiface.FTCache); err != nil { - return xerrors.Errorf("ensure one copy: %w", err) - } - - return nil -} - -// ensureOneCopy makes sure that there is only one version of sector data. -// Usually called after a successful operation was done successfully on sector data. -func (sb *SealCalls) ensureOneCopy(ctx context.Context, sid abi.SectorID, pathIDs storiface.SectorPaths, fts storiface.SectorFileType) error { - if !pathIDs.HasAllSet(fts) { - return xerrors.Errorf("ensure one copy: not all paths are set") - } - - for _, fileType := range fts.AllSet() { - pid := storiface.PathByType(pathIDs, fileType) - keepIn := []storiface.ID{storiface.ID(pid)} - - log.Debugw("ensureOneCopy", "sector", sid, "type", fileType, "keep", keepIn) - - if err := sb.sectors.storage.Remove(ctx, sid, fileType, true, keepIn); err != nil { - return err - } - } - - return nil -} - -func (sb *SealCalls) TreeDRC(ctx context.Context, task *harmonytask.TaskID, sector storiface.SectorRef, unsealed cid.Cid, size abi.PaddedPieceSize, data io.Reader, unpaddedData bool) (scid cid.Cid, ucid cid.Cid, err error) { - p1o, err := sb.makePhase1Out(unsealed, sector.ProofType) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("make phase1 output: %w", err) - } - - paths, pathIDs, releaseSector, err := sb.sectors.AcquireSector(ctx, task, sector, storiface.FTCache, storiface.FTSealed, storiface.PathSealing) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("acquiring sector paths: %w", err) - } - defer releaseSector() - - defer func() { - if err != nil { - clerr := removeDRCTrees(paths.Cache) - if clerr != nil { - log.Errorw("removing tree files after TreeDRC error", "error", clerr, "exec-error", err, "sector", sector, "cache", paths.Cache) - } - } - }() - - treeDUnsealed, err := proof.BuildTreeD(data, unpaddedData, filepath.Join(paths.Cache, proofpaths.TreeDName), size) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("building tree-d: %w", err) - } - - if treeDUnsealed != unsealed { - return cid.Undef, cid.Undef, xerrors.Errorf("tree-d cid mismatch with supplied unsealed cid") - } - - { - // create sector-sized file at paths.Sealed; PC2 transforms it into a sealed sector in-place - ssize, err := sector.ProofType.SectorSize() - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("getting sector size: %w", err) - } - - { - // copy TreeD prefix to sealed sector, SealPreCommitPhase2 will mutate it in place into the sealed sector - - // first try reflink + truncate, that should be way faster - err := reflink.Always(filepath.Join(paths.Cache, proofpaths.TreeDName), paths.Sealed) - if err == nil { - err = os.Truncate(paths.Sealed, int64(ssize)) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("truncating reflinked sealed file: %w", err) - } - } else { - log.Errorw("reflink treed -> sealed failed, falling back to slow copy, use single scratch btrfs or xfs filesystem", "error", err, "sector", sector, "cache", paths.Cache, "sealed", paths.Sealed) - - // fallback to slow copy, copy ssize bytes from treed to sealed - dst, err := os.OpenFile(paths.Sealed, os.O_WRONLY|os.O_CREATE, 0644) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("opening sealed sector file: %w", err) - } - src, err := os.Open(filepath.Join(paths.Cache, proofpaths.TreeDName)) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("opening treed sector file: %w", err) - } - - _, err = io.CopyN(dst, src, int64(ssize)) - derr := dst.Close() - _ = src.Close() - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("copying treed -> sealed: %w", err) - } - if derr != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("closing sealed file: %w", derr) - } - } - } - } - - sl, uns, err := ffi.SealPreCommitPhase2(p1o, paths.Cache, paths.Sealed) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("computing seal proof: %w", err) - } - - if uns != unsealed { - return cid.Undef, cid.Undef, xerrors.Errorf("unsealed cid changed after sealing") - } - - if err := sb.ensureOneCopy(ctx, sector.ID, pathIDs, storiface.FTCache|storiface.FTSealed); err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("ensure one copy: %w", err) - } - - return sl, uns, nil -} - -func removeDRCTrees(cache string) error { - // list files in cache - files, err := os.ReadDir(cache) - if err != nil { - return xerrors.Errorf("listing cache: %w", err) - } - - for _, file := range files { - if proofpaths.IsTreeFile(file.Name()) { - err := os.Remove(filepath.Join(cache, file.Name())) - if err != nil { - return xerrors.Errorf("removing tree file: %w", err) - } - } - } - - return nil -} - -func (sb *SealCalls) GenerateSynthPoRep() { - panic("todo") -} - -func (sb *SealCalls) PoRepSnark(ctx context.Context, sn storiface.SectorRef, sealed, unsealed cid.Cid, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness) ([]byte, error) { - vproof, err := sb.sectors.storage.GeneratePoRepVanillaProof(ctx, sn, sealed, unsealed, ticket, seed) - if err != nil { - return nil, xerrors.Errorf("failed to generate vanilla proof: %w", err) - } - - proof, err := ffi.SealCommitPhase2(vproof, sn.ID.Number, sn.ID.Miner) - if err != nil { - return nil, xerrors.Errorf("computing seal proof failed: %w", err) - } - - ok, err := ffi.VerifySeal(proof2.SealVerifyInfo{ - SealProof: sn.ProofType, - SectorID: sn.ID, - DealIDs: nil, - Randomness: ticket, - InteractiveRandomness: seed, - Proof: proof, - SealedCID: sealed, - UnsealedCID: unsealed, - }) - if err != nil { - return nil, xerrors.Errorf("failed to verify proof: %w", err) - } - if !ok { - return nil, xerrors.Errorf("porep failed to validate") - } - - return proof, nil -} - -func (sb *SealCalls) makePhase1Out(unsCid cid.Cid, spt abi.RegisteredSealProof) ([]byte, error) { - commd, err := commcid.CIDToDataCommitmentV1(unsCid) - if err != nil { - return nil, xerrors.Errorf("make uns cid: %w", err) - } - - type Config struct { - ID string `json:"id"` - Path string `json:"path"` - RowsToDiscard int `json:"rows_to_discard"` - Size int `json:"size"` - } - - type Labels struct { - H *string `json:"_h"` // proofs want this.. - Labels []Config `json:"labels"` - } - - var phase1Output struct { - CommD [32]byte `json:"comm_d"` - Config Config `json:"config"` // TreeD - Labels map[string]*Labels `json:"labels"` - RegisteredProof string `json:"registered_proof"` - } - - copy(phase1Output.CommD[:], commd) - - phase1Output.Config.ID = "tree-d" - phase1Output.Config.Path = "/placeholder" - phase1Output.Labels = map[string]*Labels{} - - switch spt { - case abi.RegisteredSealProof_StackedDrg2KiBV1_1, abi.RegisteredSealProof_StackedDrg2KiBV1_1_Feat_SyntheticPoRep: - phase1Output.Config.RowsToDiscard = 0 - phase1Output.Config.Size = 127 - phase1Output.Labels["StackedDrg2KiBV1"] = &Labels{} - phase1Output.RegisteredProof = "StackedDrg2KiBV1_1" - - for i := 0; i < 2; i++ { - phase1Output.Labels["StackedDrg2KiBV1"].Labels = append(phase1Output.Labels["StackedDrg2KiBV1"].Labels, Config{ - ID: fmt.Sprintf("layer-%d", i+1), - Path: "/placeholder", - RowsToDiscard: 0, - Size: 64, - }) - } - - case abi.RegisteredSealProof_StackedDrg8MiBV1_1, abi.RegisteredSealProof_StackedDrg8MiBV1_1_Feat_SyntheticPoRep: - phase1Output.Config.RowsToDiscard = 0 - phase1Output.Config.Size = 524287 - phase1Output.Labels["StackedDrg8MiBV1"] = &Labels{} - phase1Output.RegisteredProof = "StackedDrg8MiBV1_1" - - for i := 0; i < 2; i++ { - phase1Output.Labels["StackedDrg8MiBV1"].Labels = append(phase1Output.Labels["StackedDrg8MiBV1"].Labels, Config{ - ID: fmt.Sprintf("layer-%d", i+1), - Path: "/placeholder", - RowsToDiscard: 0, - Size: 262144, - }) - } - - case abi.RegisteredSealProof_StackedDrg512MiBV1_1: - phase1Output.Config.RowsToDiscard = 0 - phase1Output.Config.Size = 33554431 - phase1Output.Labels["StackedDrg512MiBV1"] = &Labels{} - phase1Output.RegisteredProof = "StackedDrg512MiBV1_1" - - for i := 0; i < 2; i++ { - phase1Output.Labels["StackedDrg512MiBV1"].Labels = append(phase1Output.Labels["StackedDrg512MiBV1"].Labels, Config{ - ID: fmt.Sprintf("layer-%d", i+1), - Path: "placeholder", - RowsToDiscard: 0, - Size: 16777216, - }) - } - - case abi.RegisteredSealProof_StackedDrg32GiBV1_1: - phase1Output.Config.RowsToDiscard = 0 - phase1Output.Config.Size = 2147483647 - phase1Output.Labels["StackedDrg32GiBV1"] = &Labels{} - phase1Output.RegisteredProof = "StackedDrg32GiBV1_1" - - for i := 0; i < 11; i++ { - phase1Output.Labels["StackedDrg32GiBV1"].Labels = append(phase1Output.Labels["StackedDrg32GiBV1"].Labels, Config{ - ID: fmt.Sprintf("layer-%d", i+1), - Path: "/placeholder", - RowsToDiscard: 0, - Size: 1073741824, - }) - } - - case abi.RegisteredSealProof_StackedDrg64GiBV1_1: - phase1Output.Config.RowsToDiscard = 0 - phase1Output.Config.Size = 4294967295 - phase1Output.Labels["StackedDrg64GiBV1"] = &Labels{} - phase1Output.RegisteredProof = "StackedDrg64GiBV1_1" - - for i := 0; i < 11; i++ { - phase1Output.Labels["StackedDrg64GiBV1"].Labels = append(phase1Output.Labels["StackedDrg64GiBV1"].Labels, Config{ - ID: fmt.Sprintf("layer-%d", i+1), - Path: "/placeholder", - RowsToDiscard: 0, - Size: 2147483648, - }) - } - - default: - panic("proof type not handled") - } - - return json.Marshal(phase1Output) -} - -func (sb *SealCalls) LocalStorage(ctx context.Context) ([]storiface.StoragePath, error) { - return sb.sectors.localStore.Local(ctx) -} - -func (sb *SealCalls) FinalizeSector(ctx context.Context, sector storiface.SectorRef, keepUnsealed bool) error { - alloc := storiface.FTNone - if keepUnsealed { - // note: In Curio we don't write the unsealed file in any of the previous stages, it's only written here from tree-d - alloc = storiface.FTUnsealed - } - - sectorPaths, pathIDs, releaseSector, err := sb.sectors.AcquireSector(ctx, nil, sector, storiface.FTCache, alloc, storiface.PathSealing) - if err != nil { - return xerrors.Errorf("acquiring sector paths: %w", err) - } - defer releaseSector() - - ssize, err := sector.ProofType.SectorSize() - if err != nil { - return xerrors.Errorf("getting sector size: %w", err) - } - - if keepUnsealed { - // tree-d contains exactly unsealed data in the prefix, so - // * we move it to a temp file - // * we truncate the temp file to the sector size - // * we move the temp file to the unsealed location - - // temp path in cache where we'll move tree-d before truncating - // it is in the cache directory so that we can use os.Rename to move it - // to unsealed (which may be on a different filesystem) - tempUnsealed := filepath.Join(sectorPaths.Cache, storiface.SectorName(sector.ID)) - - _, terr := os.Stat(tempUnsealed) - tempUnsealedExists := terr == nil - - // First handle an edge case where we have already gone through this step, - // but ClearCache or later steps failed. In that case we'll see tree-d missing and unsealed present - - if _, err := os.Stat(filepath.Join(sectorPaths.Cache, proofpaths.TreeDName)); err != nil { - if os.IsNotExist(err) { - // check that unsealed exists and is the right size - st, err := os.Stat(sectorPaths.Unsealed) - if err != nil { - if os.IsNotExist(err) { - if tempUnsealedExists { - // unsealed file does not exist, but temp unsealed file does - // so we can just resume where the previous attempt left off - goto retryUnsealedMove - } - return xerrors.Errorf("neither unsealed file nor temp-unsealed file exists") - } - return xerrors.Errorf("stat unsealed file: %w", err) - } - if st.Size() != int64(ssize) { - if tempUnsealedExists { - // unsealed file exists but is the wrong size, and temp unsealed file exists - // so we can just resume where the previous attempt left off with some cleanup - - if err := os.Remove(sectorPaths.Unsealed); err != nil { - return xerrors.Errorf("removing unsealed file from last attempt: %w", err) - } - - goto retryUnsealedMove - } - return xerrors.Errorf("unsealed file is not the right size: %d != %d and temp unsealed is missing", st.Size(), ssize) - } - - // all good, just log that this edge case happened - log.Warnw("unsealed file exists but tree-d is missing, skipping move", "sector", sector.ID, "unsealed", sectorPaths.Unsealed, "cache", sectorPaths.Cache) - goto afterUnsealedMove - } - return xerrors.Errorf("stat tree-d file: %w", err) - } - - // If the state in clean do the move - - // move tree-d to temp file - if err := os.Rename(filepath.Join(sectorPaths.Cache, proofpaths.TreeDName), tempUnsealed); err != nil { - return xerrors.Errorf("moving tree-d to temp file: %w", err) - } - - retryUnsealedMove: - - // truncate sealed file to sector size - if err := os.Truncate(tempUnsealed, int64(ssize)); err != nil { - return xerrors.Errorf("truncating unsealed file to sector size: %w", err) - } - - // move temp file to unsealed location - if err := paths.Move(tempUnsealed, sectorPaths.Unsealed); err != nil { - return xerrors.Errorf("move temp unsealed sector to final location (%s -> %s): %w", tempUnsealed, sectorPaths.Unsealed, err) - } - } - -afterUnsealedMove: - if err := ffi.ClearCache(uint64(ssize), sectorPaths.Cache); err != nil { - return xerrors.Errorf("clearing cache: %w", err) - } - - if err := sb.ensureOneCopy(ctx, sector.ID, pathIDs, storiface.FTCache|alloc); err != nil { - return xerrors.Errorf("ensure one copy: %w", err) - } - - return nil -} - -func (sb *SealCalls) MoveStorage(ctx context.Context, sector storiface.SectorRef, taskID *harmonytask.TaskID) error { - // only move the unsealed file if it still exists and needs moving - moveUnsealed := storiface.FTUnsealed - { - found, unsealedPathType, err := sb.sectorStorageType(ctx, sector, storiface.FTUnsealed) - if err != nil { - return xerrors.Errorf("checking cache storage type: %w", err) - } - - if !found || unsealedPathType == storiface.PathStorage { - moveUnsealed = storiface.FTNone - } - } - - toMove := storiface.FTCache | storiface.FTSealed | moveUnsealed - - var opts []storiface.AcquireOption - if taskID != nil { - resv, ok := sb.sectors.storageReservations.Load(*taskID) - // if the reservation is missing MoveStorage will simply create one internally. This is fine as the reservation - // will only be missing when the node is restarting, which means that the missing reservations will get recreated - // anyways, and before we start claiming other tasks. - if ok { - defer resv.Release() - - if resv.Alloc != storiface.FTNone { - return xerrors.Errorf("task %d has storage reservation with alloc", taskID) - } - if resv.Existing != toMove|storiface.FTUnsealed { - return xerrors.Errorf("task %d has storage reservation with different existing", taskID) - } - - opts = append(opts, storiface.AcquireInto(storiface.PathsWithIDs{Paths: resv.Paths, IDs: resv.PathIDs})) - } - } - - err := sb.sectors.storage.MoveStorage(ctx, sector, toMove, opts...) - if err != nil { - return xerrors.Errorf("moving storage: %w", err) - } - - for _, fileType := range toMove.AllSet() { - if err := sb.sectors.storage.RemoveCopies(ctx, sector.ID, fileType); err != nil { - return xerrors.Errorf("rm copies (t:%s, s:%v): %w", fileType, sector, err) - } - } - - return nil -} - -func (sb *SealCalls) sectorStorageType(ctx context.Context, sector storiface.SectorRef, ft storiface.SectorFileType) (sectorFound bool, ptype storiface.PathType, err error) { - stores, err := sb.sectors.sindex.StorageFindSector(ctx, sector.ID, ft, 0, false) - if err != nil { - return false, "", xerrors.Errorf("finding sector: %w", err) - } - if len(stores) == 0 { - return false, "", nil - } - - for _, store := range stores { - if store.CanSeal { - return true, storiface.PathSealing, nil - } - } - - return true, storiface.PathStorage, nil -} diff --git a/curiosrc/ffi/task_storage.go b/curiosrc/ffi/task_storage.go deleted file mode 100644 index f01a472fa8c..00000000000 --- a/curiosrc/ffi/task_storage.go +++ /dev/null @@ -1,231 +0,0 @@ -package ffi - -import ( - "context" - "sync" - "time" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - storagePaths "github.com/filecoin-project/lotus/storage/paths" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -type SectorRef struct { - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - RegSealProof abi.RegisteredSealProof `db:"reg_seal_proof"` -} - -func (sr SectorRef) ID() abi.SectorID { - return abi.SectorID{ - Miner: abi.ActorID(sr.SpID), - Number: abi.SectorNumber(sr.SectorNumber), - } -} - -func (sr SectorRef) Ref() storiface.SectorRef { - return storiface.SectorRef{ - ID: sr.ID(), - ProofType: sr.RegSealProof, - } -} - -type TaskStorage struct { - sc *SealCalls - - alloc, existing storiface.SectorFileType - ssize abi.SectorSize - pathType storiface.PathType - - taskToSectorRef func(taskID harmonytask.TaskID) (SectorRef, error) -} - -type ReleaseStorageFunc func() // free storage reservation - -type StorageReservation struct { - SectorRef SectorRef - Release ReleaseStorageFunc - Paths storiface.SectorPaths - PathIDs storiface.SectorPaths - - Alloc, Existing storiface.SectorFileType -} - -func (sb *SealCalls) Storage(taskToSectorRef func(taskID harmonytask.TaskID) (SectorRef, error), alloc, existing storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) *TaskStorage { - return &TaskStorage{ - sc: sb, - alloc: alloc, - existing: existing, - ssize: ssize, - pathType: pathType, - taskToSectorRef: taskToSectorRef, - } -} - -func (t *TaskStorage) HasCapacity() bool { - ctx := context.Background() - - paths, err := t.sc.sectors.sindex.StorageBestAlloc(ctx, t.alloc, t.ssize, t.pathType, storagePaths.NoMinerFilter) - if err != nil { - log.Errorf("finding best alloc in HasCapacity: %+v", err) - return false - } - - local, err := t.sc.sectors.localStore.Local(ctx) - if err != nil { - log.Errorf("getting local storage: %+v", err) - return false - } - - for _, path := range paths { - if t.pathType == storiface.PathStorage && !path.CanStore { - continue // we want to store, and this isn't a store path - } - if t.pathType == storiface.PathSealing && !path.CanSeal { - continue // we want to seal, and this isn't a seal path - } - - // check if this path is on this node - var found bool - for _, storagePath := range local { - if storagePath.ID == path.ID { - found = true - break - } - } - if !found { - // this path isn't on this node - continue - } - - // StorageBestAlloc already checks that there is enough space; Not atomic like reserving space, but it's - // good enough for HasCapacity - return true - } - - return false // no path found -} - -func (t *TaskStorage) Claim(taskID int) error { - // TaskStorage Claim Attempts to reserve storage for the task - // A: Create a reservation for files to be allocated - // B: Create a reservation for existing files to be fetched into local storage - // C: Create a reservation for existing files in local storage which may be extended (e.g. sector cache when computing Trees) - - ctx := context.Background() - - sectorRef, err := t.taskToSectorRef(harmonytask.TaskID(taskID)) - if err != nil { - return xerrors.Errorf("getting sector ref: %w", err) - } - - // storage writelock sector - lkctx, cancel := context.WithCancel(ctx) - - requestedTypes := t.alloc | t.existing - - lockAcquireTimuout := time.Second * 10 - lockAcquireTimer := time.NewTimer(lockAcquireTimuout) - - go func() { - defer cancel() - - select { - case <-lockAcquireTimer.C: - case <-ctx.Done(): - } - }() - - if err := t.sc.sectors.sindex.StorageLock(lkctx, sectorRef.ID(), storiface.FTNone, requestedTypes); err != nil { - // timer will expire - return xerrors.Errorf("claim StorageLock: %w", err) - } - - if !lockAcquireTimer.Stop() { - // timer expired, so lkctx is done, and that means the lock was acquired and dropped.. - return xerrors.Errorf("failed to acquire lock") - } - defer func() { - // make sure we release the sector lock - lockAcquireTimer.Reset(0) - }() - - // First see what we have locally. We are putting allocate and existing together because local acquire will look - // for existing files for allocate requests, separately existing files which aren't found locally will be need to - // be fetched, so we will need to create reservations for that too. - // NOTE localStore.AcquireSector does not open or create any files, nor does it reserve space. It only proposes - // paths to be used. - pathsFs, pathIDs, err := t.sc.sectors.localStore.AcquireSector(ctx, sectorRef.Ref(), storiface.FTNone, requestedTypes, t.pathType, storiface.AcquireMove) - if err != nil { - return err - } - - // reserve the space - release, err := t.sc.sectors.localStore.Reserve(ctx, sectorRef.Ref(), requestedTypes, pathIDs, storiface.FSOverheadSeal) - if err != nil { - return err - } - - var releaseOnce sync.Once - releaseFunc := func() { - releaseOnce.Do(release) - } - - sres := &StorageReservation{ - SectorRef: sectorRef, - Release: releaseFunc, - Paths: pathsFs, - PathIDs: pathIDs, - - Alloc: t.alloc, - Existing: t.existing, - } - - t.sc.sectors.storageReservations.Store(harmonytask.TaskID(taskID), sres) - - log.Debugw("claimed storage", "task_id", taskID, "sector", sectorRef.ID(), "paths", pathsFs) - - // note: we drop the sector writelock on return; THAT IS INTENTIONAL, this code runs in CanAccept, which doesn't - // guarantee that the work for this sector will happen on this node; SDR CanAccept just ensures that the node can - // run the job, harmonytask is what ensures that only one SDR runs at a time - return nil -} - -func (t *TaskStorage) MarkComplete(taskID int) error { - // MarkComplete is ALWAYS called after the task is done or not scheduled - // If Claim is called and returns without errors, MarkComplete with the same - // taskID is guaranteed to eventually be called - - sectorRef, err := t.taskToSectorRef(harmonytask.TaskID(taskID)) - if err != nil { - return xerrors.Errorf("getting sector ref: %w", err) - } - - sres, ok := t.sc.sectors.storageReservations.Load(harmonytask.TaskID(taskID)) - if !ok { - return xerrors.Errorf("no reservation found for task %d", taskID) - } - - if sectorRef != sres.SectorRef { - return xerrors.Errorf("reservation sector ref doesn't match task sector ref: %+v != %+v", sectorRef, sres.SectorRef) - } - - log.Debugw("marking storage complete", "task_id", taskID, "sector", sectorRef.ID(), "paths", sres.Paths) - - // remove the reservation - t.sc.sectors.storageReservations.Delete(harmonytask.TaskID(taskID)) - - // release the reservation - sres.Release() - - // note: this only frees the reservation, allocated sectors are declared in AcquireSector which is aware of - // the reservation - return nil -} - -var _ resources.Storage = &TaskStorage{} diff --git a/curiosrc/gc/storage_endpoint_gc.go b/curiosrc/gc/storage_endpoint_gc.go deleted file mode 100644 index 45783f35367..00000000000 --- a/curiosrc/gc/storage_endpoint_gc.go +++ /dev/null @@ -1,287 +0,0 @@ -package gc - -import ( - "context" - "strings" - "sync" - "time" - - logging "github.com/ipfs/go-log/v2" - "github.com/samber/lo" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/lib/result" - "github.com/filecoin-project/lotus/storage/paths" - "github.com/filecoin-project/lotus/storage/sealer/fsutil" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -var log = logging.Logger("curiogc") - -const StorageEndpointGCInterval = 21 * time.Minute -const StorageEndpointDeadTime = StorageEndpointGCInterval * 6 // ~2h -const MaxParallelEndpointChecks = 32 - -type StorageEndpointGC struct { - si *paths.DBIndex - remote *paths.Remote - db *harmonydb.DB -} - -func NewStorageEndpointGC(si *paths.DBIndex, remote *paths.Remote, db *harmonydb.DB) *StorageEndpointGC { - return &StorageEndpointGC{ - si: si, - remote: remote, - db: db, - } -} - -func (s *StorageEndpointGC) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - /* - 1. Get all storage paths + urls (endpoints) - 2. Ping each url, record results - 3. Update sector_path_url_liveness with success/failure - 4.1 If a URL was consistently down for StorageEndpointDeadTime, remove it from the storage_path table - 4.2 Remove storage paths with no URLs remaining - 4.2.1 in the same transaction remove sector refs to the dead path - */ - - ctx := context.Background() - - var pathRefs []struct { - StorageID storiface.ID `db:"storage_id"` - Urls string `db:"urls"` - LastHeartbeat *time.Time `db:"last_heartbeat"` - } - - err = s.db.Select(ctx, &pathRefs, `SELECT storage_id, urls, last_heartbeat FROM storage_path`) - if err != nil { - return false, xerrors.Errorf("getting path metadata: %w", err) - } - - type pingResult struct { - storageID storiface.ID - url string - - res result.Result[fsutil.FsStat] - } - - var pingResults []pingResult - var resultLk sync.Mutex - var resultThrottle = make(chan struct{}, MaxParallelEndpointChecks) - - for _, pathRef := range pathRefs { - pathRef := pathRef - urls := strings.Split(pathRef.Urls, paths.URLSeparator) - - for _, url := range urls { - url := url - - select { - case resultThrottle <- struct{}{}: - case <-ctx.Done(): - return false, ctx.Err() - } - - go func() { - defer func() { - <-resultThrottle - }() - - st, err := s.remote.StatUrl(ctx, url, pathRef.StorageID) - - res := pingResult{ - storageID: pathRef.StorageID, - url: url, - res: result.Wrap(st, err), - } - - resultLk.Lock() - pingResults = append(pingResults, res) - resultLk.Unlock() - }() - } - } - - // Wait for all pings to finish - for i := 0; i < MaxParallelEndpointChecks; i++ { - select { - case resultThrottle <- struct{}{}: - case <-ctx.Done(): - return false, ctx.Err() - } - } - - // Update the liveness table - - /* - create table sector_path_url_liveness ( - storage_id text, - url text, - - last_checked timestamp not null, - last_live timestamp, - last_dead timestamp, - last_dead_reason text, - - primary key (storage_id, url), - - foreign key (storage_id) references storage_path (storage_id) on delete cascade - ) - */ - - currentTime := time.Now().UTC() - - committed, err := s.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (bool, error) { - for _, pingResult := range pingResults { - var lastLive, lastDead, lastDeadReason interface{} - if pingResult.res.Error == nil { - lastLive = currentTime.UTC() - lastDead = nil - lastDeadReason = nil - } else { - lastLive = nil - lastDead = currentTime.UTC() - lastDeadReason = pingResult.res.Error.Error() - } - - // This function updates the liveness data for a URL in the `sector_path_url_liveness` table. - // - // On conflict, where the same `storage_id` and `url` are found: - // - last_checked is always updated to the current timestamp. - // - last_live is updated to the new `last_live` if it is not null; otherwise, it retains the existing value. - // - last_dead is conditionally updated based on two criteria: - // 1. It is set to the new `last_dead` if the existing `last_dead` is null (indicating this is the first recorded failure). - // 2. It is updated to the new `last_dead` if there has been a live instance recorded after the most recent dead timestamp, indicating the resource was alive again before this new failure. - // 3. It retains the existing value if none of the above conditions are met. - // - last_dead_reason is updated similarly to `last_live`, using COALESCE to prefer the new reason if it's provided. - _, err := tx.Exec(` - INSERT INTO sector_path_url_liveness (storage_id, url, last_checked, last_live, last_dead, last_dead_reason) - VALUES ($1, $2, $3, $4, $5, $6) - ON CONFLICT (storage_id, url) DO UPDATE - SET last_checked = EXCLUDED.last_checked, - last_live = COALESCE(EXCLUDED.last_live, sector_path_url_liveness.last_live), - last_dead = CASE - WHEN sector_path_url_liveness.last_dead IS NULL THEN EXCLUDED.last_dead - WHEN sector_path_url_liveness.last_dead IS NOT NULL AND sector_path_url_liveness.last_live > sector_path_url_liveness.last_dead THEN EXCLUDED.last_dead - ELSE sector_path_url_liveness.last_dead - END, - last_dead_reason = COALESCE(EXCLUDED.last_dead_reason, sector_path_url_liveness.last_dead_reason) - `, pingResult.storageID, pingResult.url, currentTime, lastLive, lastDead, lastDeadReason) - if err != nil { - return false, xerrors.Errorf("updating liveness data: %w", err) - } - } - - return true, nil - }, harmonydb.OptionRetry()) - if err != nil { - return false, xerrors.Errorf("sector_path_url_liveness update: %w", err) - } - if !committed { - return false, xerrors.Errorf("sector_path_url_liveness update: transaction didn't commit") - } - - /////// - // Now we do the actual database cleanup - if !stillOwned() { - return false, xerrors.Errorf("task no longer owned") - } - - committed, err = s.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (bool, error) { - // Identify URLs that are consistently down - var deadURLs []struct { - StorageID storiface.ID - URL string - } - err = tx.Select(&deadURLs, ` - SELECT storage_id, url FROM sector_path_url_liveness - WHERE last_dead > COALESCE(last_live, '1970-01-01') AND last_dead < $1 - `, currentTime.Add(-StorageEndpointDeadTime).UTC()) - if err != nil { - return false, xerrors.Errorf("selecting dead URLs: %w", err) - } - - log.Debugw("dead urls", "dead_urls", deadURLs) - - // Remove dead URLs from storage_path entries and handle path cleanup - for _, du := range deadURLs { - // Fetch the current URLs for the storage path - var URLs string - err = tx.QueryRow("SELECT urls FROM storage_path WHERE storage_id = $1", du.StorageID).Scan(&URLs) - if err != nil { - return false, xerrors.Errorf("fetching storage paths: %w", err) - } - - // Filter out the dead URL using lo.Reject and prepare the updated list - urls := strings.Split(URLs, paths.URLSeparator) - urls = lo.Reject(urls, func(u string, _ int) bool { - return u == du.URL - }) - - log.Debugw("filtered urls", "urls", urls, "dead_url", du.URL, "storage_id", du.StorageID) - - if len(urls) == 0 { - // If no URLs left, remove the storage path entirely - _, err = tx.Exec("DELETE FROM storage_path WHERE storage_id = $1", du.StorageID) - if err != nil { - return false, xerrors.Errorf("deleting storage path: %w", err) - } - _, err = tx.Exec("DELETE FROM sector_location WHERE storage_id = $1", du.StorageID) - if err != nil { - return false, xerrors.Errorf("deleting sector locations: %w", err) - } - } else { - // Update the storage path with the filtered URLs - newURLs := strings.Join(urls, paths.URLSeparator) - _, err = tx.Exec("UPDATE storage_path SET urls = $1 WHERE storage_id = $2", newURLs, du.StorageID) - if err != nil { - return false, xerrors.Errorf("updating storage path urls: %w", err) - } - // Remove sector_path_url_liveness entry - _, err = tx.Exec("DELETE FROM sector_path_url_liveness WHERE storage_id = $1 AND url = $2", du.StorageID, du.URL) - if err != nil { - return false, xerrors.Errorf("deleting sector_path_url_liveness entry: %w", err) - } - } - } - - return true, nil - }, harmonydb.OptionRetry()) - if err != nil { - return false, xerrors.Errorf("removing dead URLs and cleaning storage paths: %w", err) - } - if !committed { - return false, xerrors.Errorf("transaction for removing dead URLs and cleaning paths did not commit") - } - - return true, nil -} - -func (s *StorageEndpointGC) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - id := ids[0] - return &id, nil -} - -func (s *StorageEndpointGC) TypeDetails() harmonytask.TaskTypeDetails { - return harmonytask.TaskTypeDetails{ - Max: 1, - Name: "StorageMetaGC", - Cost: resources.Resources{ - Cpu: 1, - Ram: 64 << 20, - Gpu: 0, - }, - IAmBored: harmonytask.SingletonTaskAdder(StorageEndpointGCInterval, s), - } -} - -func (s *StorageEndpointGC) Adder(taskFunc harmonytask.AddTaskFunc) { - // lazy endpoint, added when bored - return -} - -var _ harmonytask.TaskInterface = &StorageEndpointGC{} diff --git a/curiosrc/market/deal_ingest.go b/curiosrc/market/deal_ingest.go deleted file mode 100644 index ea382717acc..00000000000 --- a/curiosrc/market/deal_ingest.go +++ /dev/null @@ -1,137 +0,0 @@ -package market - -import ( - "context" - "encoding/json" - "net/http" - "net/url" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-padreader" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/network" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/curiosrc/seal" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" -) - -type Ingester interface { - AllocatePieceToSector(ctx context.Context, maddr address.Address, piece api.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) -} - -type PieceIngesterApi interface { - StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) - StateMinerAllocated(ctx context.Context, a address.Address, key types.TipSetKey) (*bitfield.BitField, error) - StateNetworkVersion(ctx context.Context, key types.TipSetKey) (network.Version, error) -} - -type PieceIngester struct { - db *harmonydb.DB - api PieceIngesterApi -} - -func NewPieceIngester(db *harmonydb.DB, api PieceIngesterApi) *PieceIngester { - return &PieceIngester{db: db, api: api} -} - -func (p *PieceIngester) AllocatePieceToSector(ctx context.Context, maddr address.Address, piece api.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) { - mi, err := p.api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return api.SectorOffset{}, err - } - - if piece.DealProposal.PieceSize != abi.PaddedPieceSize(mi.SectorSize) { - return api.SectorOffset{}, xerrors.Errorf("only full sector pieces supported for now") - } - - // check raw size - if piece.DealProposal.PieceSize != padreader.PaddedSize(uint64(rawSize)).Padded() { - return api.SectorOffset{}, xerrors.Errorf("raw size doesn't match padded piece size") - } - - // add initial piece + to a sector - nv, err := p.api.StateNetworkVersion(ctx, types.EmptyTSK) - if err != nil { - return api.SectorOffset{}, xerrors.Errorf("getting network version: %w", err) - } - - synth := false // todo synthetic porep config - spt, err := miner.PreferredSealProofTypeFromWindowPoStType(nv, mi.WindowPoStProofType, synth) - if err != nil { - return api.SectorOffset{}, xerrors.Errorf("getting seal proof type: %w", err) - } - - mid, err := address.IDFromAddress(maddr) - if err != nil { - return api.SectorOffset{}, xerrors.Errorf("getting miner ID: %w", err) - } - - num, err := seal.AllocateSectorNumbers(ctx, p.api, p.db, maddr, 1, func(tx *harmonydb.Tx, numbers []abi.SectorNumber) (bool, error) { - if len(numbers) != 1 { - return false, xerrors.Errorf("expected one sector number") - } - n := numbers[0] - - _, err := tx.Exec("INSERT INTO sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof) VALUES ($1, $2, $3)", mid, n, spt) - if err != nil { - return false, xerrors.Errorf("inserting into sectors_sdr_pipeline: %w", err) - } - - dataHdrJson, err := json.Marshal(header) - if err != nil { - return false, xerrors.Errorf("json.Marshal(header): %w", err) - } - - dealProposalJson, err := json.Marshal(piece.DealProposal) - if err != nil { - return false, xerrors.Errorf("json.Marshal(piece.DealProposal): %w", err) - } - - _, err = tx.Exec(`INSERT INTO sectors_sdr_initial_pieces (sp_id, - sector_number, - piece_index, - - piece_cid, - piece_size, - - data_url, - data_headers, - data_raw_size, - data_delete_on_finalize, - - f05_publish_cid, - f05_deal_id, - f05_deal_proposal, - f05_deal_start_epoch, - f05_deal_end_epoch) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)`, - mid, n, 0, - piece.DealProposal.PieceCID, piece.DealProposal.PieceSize, - source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed, - piece.PublishCid, piece.DealID, dealProposalJson, piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch) - if err != nil { - return false, xerrors.Errorf("inserting into sectors_sdr_initial_pieces: %w", err) - } - - return true, nil - }) - if err != nil { - return api.SectorOffset{}, xerrors.Errorf("allocating sector numbers: %w", err) - } - - if len(num) != 1 { - return api.SectorOffset{}, xerrors.Errorf("expected one sector number") - } - - // After we insert the piece/sector_pipeline entries, the lpseal/poller will take it from here - - return api.SectorOffset{ - Sector: num[0], - Offset: 0, - }, nil -} diff --git a/curiosrc/market/fakelm/iface.go b/curiosrc/market/fakelm/iface.go deleted file mode 100644 index 1bc91b35e75..00000000000 --- a/curiosrc/market/fakelm/iface.go +++ /dev/null @@ -1,33 +0,0 @@ -package fakelm - -import ( - "context" - - "github.com/google/uuid" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -// MinimalLMApi is a subset of the LotusMiner API that is exposed by Curio -// for consumption by boost -type MinimalLMApi interface { - ActorAddress(context.Context) (address.Address, error) - - WorkerJobs(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) - - SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) - - SectorsList(context.Context) ([]abi.SectorNumber, error) - SectorsSummary(ctx context.Context) (map[api.SectorState]int, error) - - SectorsListInStates(context.Context, []api.SectorState) ([]abi.SectorNumber, error) - - StorageRedeclareLocal(context.Context, *storiface.ID, bool) error - - ComputeDataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data) (abi.PieceInfo, error) - SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d api.PieceDealInfo) (api.SectorOffset, error) -} diff --git a/curiosrc/market/fakelm/lmimpl.go b/curiosrc/market/fakelm/lmimpl.go deleted file mode 100644 index 9dc19e627d8..00000000000 --- a/curiosrc/market/fakelm/lmimpl.go +++ /dev/null @@ -1,348 +0,0 @@ -package fakelm - -import ( - "context" - "encoding/base64" - "net/http" - "net/url" - - "github.com/gbrlsnchs/jwt/v3" - "github.com/google/uuid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-jsonrpc/auth" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/network" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/curiosrc/market" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/storage/paths" - sealing "github.com/filecoin-project/lotus/storage/pipeline" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -type LMRPCProvider struct { - si paths.SectorIndex - full api.FullNode - - maddr address.Address // lotus-miner RPC is single-actor - minerID abi.ActorID - - ssize abi.SectorSize - - pi market.Ingester - db *harmonydb.DB - conf *config.CurioConfig -} - -func NewLMRPCProvider(si paths.SectorIndex, full api.FullNode, maddr address.Address, minerID abi.ActorID, ssize abi.SectorSize, pi market.Ingester, db *harmonydb.DB, conf *config.CurioConfig) *LMRPCProvider { - return &LMRPCProvider{ - si: si, - full: full, - maddr: maddr, - minerID: minerID, - ssize: ssize, - pi: pi, - db: db, - conf: conf, - } -} - -func (l *LMRPCProvider) ActorAddress(ctx context.Context) (address.Address, error) { - return l.maddr, nil -} - -func (l *LMRPCProvider) WorkerJobs(ctx context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) { - // correct enough - return map[uuid.UUID][]storiface.WorkerJob{}, nil -} - -func (l *LMRPCProvider) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) { - si, err := l.si.StorageFindSector(ctx, abi.SectorID{Miner: l.minerID, Number: sid}, storiface.FTSealed|storiface.FTCache, 0, false) - if err != nil { - return api.SectorInfo{}, err - } - - var ssip []struct { - PieceCID *string `db:"piece_cid"` - DealID *int64 `db:"f05_deal_id"` - } - - err = l.db.Select(ctx, &ssip, "SELECT ssip.piece_cid, ssip.f05_deal_id FROM sectors_sdr_pipeline p LEFT JOIN sectors_sdr_initial_pieces ssip ON p.sp_id = ssip.sp_id AND p.sector_number = ssip.sector_number WHERE p.sp_id = $1 AND p.sector_number = $2", l.minerID, sid) - if err != nil { - return api.SectorInfo{}, err - } - - var deals []abi.DealID - if len(ssip) > 0 { - for _, d := range ssip { - if d.DealID != nil { - deals = append(deals, abi.DealID(*d.DealID)) - } - } - } else { - osi, err := l.full.StateSectorGetInfo(ctx, l.maddr, sid, types.EmptyTSK) - if err != nil { - return api.SectorInfo{}, err - } - - if osi != nil { - deals = osi.DealIDs - } - } - - spt, err := miner.SealProofTypeFromSectorSize(l.ssize, network.Version20, false) // good enough, just need this for ssize anyways - if err != nil { - return api.SectorInfo{}, err - } - - if len(si) == 0 { - state := api.SectorState(sealing.UndefinedSectorState) - if len(ssip) > 0 { - state = api.SectorState(sealing.PreCommit1) - } - - return api.SectorInfo{ - SectorID: sid, - State: state, - CommD: nil, - CommR: nil, - Proof: nil, - Deals: deals, - Pieces: nil, - Ticket: api.SealTicket{}, - Seed: api.SealSeed{}, - PreCommitMsg: nil, - CommitMsg: nil, - Retries: 0, - ToUpgrade: false, - ReplicaUpdateMessage: nil, - LastErr: "", - Log: nil, - SealProof: spt, - Activation: 0, - Expiration: 0, - DealWeight: big.Zero(), - VerifiedDealWeight: big.Zero(), - InitialPledge: big.Zero(), - OnTime: 0, - Early: 0, - }, nil - } - - var state = api.SectorState(sealing.Proving) - if !si[0].CanStore { - state = api.SectorState(sealing.PreCommit2) - } - - // todo improve this with on-chain info - return api.SectorInfo{ - SectorID: sid, - State: state, - CommD: nil, - CommR: nil, - Proof: nil, - Deals: deals, - Pieces: nil, - Ticket: api.SealTicket{}, - Seed: api.SealSeed{}, - PreCommitMsg: nil, - CommitMsg: nil, - Retries: 0, - ToUpgrade: false, - ReplicaUpdateMessage: nil, - LastErr: "", - Log: nil, - - SealProof: spt, - Activation: 0, - Expiration: 0, - DealWeight: big.Zero(), - VerifiedDealWeight: big.Zero(), - InitialPledge: big.Zero(), - OnTime: 0, - Early: 0, - }, nil -} - -func (l *LMRPCProvider) SectorsList(ctx context.Context) ([]abi.SectorNumber, error) { - decls, err := l.si.StorageList(ctx) - if err != nil { - return nil, err - } - - var out []abi.SectorNumber - for _, decl := range decls { - for _, s := range decl { - if s.Miner != l.minerID { - continue - } - - out = append(out, s.SectorID.Number) - } - } - - return out, nil -} - -type sectorParts struct { - sealed, unsealed, cache bool - inStorage bool -} - -func (l *LMRPCProvider) SectorsSummary(ctx context.Context) (map[api.SectorState]int, error) { - decls, err := l.si.StorageList(ctx) - if err != nil { - return nil, err - } - - states := map[abi.SectorID]sectorParts{} - for si, decll := range decls { - sinfo, err := l.si.StorageInfo(ctx, si) - if err != nil { - return nil, err - } - - for _, decl := range decll { - if decl.Miner != l.minerID { - continue - } - - state := states[abi.SectorID{Miner: decl.Miner, Number: decl.SectorID.Number}] - state.sealed = state.sealed || decl.Has(storiface.FTSealed) - state.unsealed = state.unsealed || decl.Has(storiface.FTUnsealed) - state.cache = state.cache || decl.Has(storiface.FTCache) - state.inStorage = state.inStorage || sinfo.CanStore - states[abi.SectorID{Miner: decl.Miner, Number: decl.SectorID.Number}] = state - } - } - - out := map[api.SectorState]int{} - for _, state := range states { - switch { - case state.sealed && state.inStorage: - out[api.SectorState(sealing.Proving)]++ - default: - // not even close to correct, but good enough for now - out[api.SectorState(sealing.PreCommit1)]++ - } - } - - return out, nil -} - -func (l *LMRPCProvider) SectorsListInStates(ctx context.Context, want []api.SectorState) ([]abi.SectorNumber, error) { - decls, err := l.si.StorageList(ctx) - if err != nil { - return nil, err - } - - wantProving, wantPrecommit1 := false, false - for _, s := range want { - switch s { - case api.SectorState(sealing.Proving): - wantProving = true - case api.SectorState(sealing.PreCommit1): - wantPrecommit1 = true - } - } - - states := map[abi.SectorID]sectorParts{} - - for si, decll := range decls { - sinfo, err := l.si.StorageInfo(ctx, si) - if err != nil { - return nil, err - } - - for _, decl := range decll { - if decl.Miner != l.minerID { - continue - } - - state := states[abi.SectorID{Miner: decl.Miner, Number: decl.SectorID.Number}] - state.sealed = state.sealed || decl.Has(storiface.FTSealed) - state.unsealed = state.unsealed || decl.Has(storiface.FTUnsealed) - state.cache = state.cache || decl.Has(storiface.FTCache) - state.inStorage = state.inStorage || sinfo.CanStore - states[abi.SectorID{Miner: decl.Miner, Number: decl.SectorID.Number}] = state - } - } - var out []abi.SectorNumber - - for id, state := range states { - switch { - case state.sealed && state.inStorage: - if wantProving { - out = append(out, id.Number) - } - default: - // not even close to correct, but good enough for now - if wantPrecommit1 { - out = append(out, id.Number) - } - } - } - - return out, nil -} - -func (l *LMRPCProvider) StorageRedeclareLocal(ctx context.Context, id *storiface.ID, b bool) error { - // so this rescans and redeclares sectors on lotus-miner; whyyy is boost even calling this? - - return nil -} - -func (l *LMRPCProvider) IsUnsealed(ctx context.Context, sectorNum abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) { - sectorID := abi.SectorID{Miner: l.minerID, Number: sectorNum} - - si, err := l.si.StorageFindSector(ctx, sectorID, storiface.FTUnsealed, 0, false) - if err != nil { - return false, err - } - - // yes, yes, technically sectors can be partially unsealed, but that is never done in practice - // and can't even be easily done with the current implementation - return len(si) > 0, nil -} - -func (l *LMRPCProvider) ComputeDataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data) (abi.PieceInfo, error) { - return abi.PieceInfo{}, xerrors.Errorf("not supported") -} - -func (l *LMRPCProvider) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d api.PieceDealInfo) (api.SectorOffset, error) { - if d.DealProposal.PieceSize != abi.PaddedPieceSize(l.ssize) { - return api.SectorOffset{}, xerrors.Errorf("only full-sector pieces are supported") - } - - return api.SectorOffset{}, xerrors.Errorf("not supported, use AllocatePieceToSector") -} - -func (l *LMRPCProvider) AllocatePieceToSector(ctx context.Context, maddr address.Address, piece api.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) { - return l.pi.AllocatePieceToSector(ctx, maddr, piece, rawSize, source, header) -} - -func (l *LMRPCProvider) AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) { - type jwtPayload struct { - Allow []auth.Permission - } - - p := jwtPayload{ - Allow: perms, - } - - sk, err := base64.StdEncoding.DecodeString(l.conf.Apis.StorageRPCSecret) - if err != nil { - return nil, xerrors.Errorf("decode secret: %w", err) - } - - return jwt.Sign(&p, jwt.NewHS256(sk)) -} - -var _ MinimalLMApi = &LMRPCProvider{} diff --git a/curiosrc/market/lmrpc/lmrpc.go b/curiosrc/market/lmrpc/lmrpc.go deleted file mode 100644 index 7286cf3667d..00000000000 --- a/curiosrc/market/lmrpc/lmrpc.go +++ /dev/null @@ -1,499 +0,0 @@ -package lmrpc - -import ( - "context" - "fmt" - "io" - "net" - "net/http" - "net/url" - "strconv" - "strings" - "sync" - "time" - - "github.com/google/uuid" - logging "github.com/ipfs/go-log/v2" - "github.com/jackc/pgx/v5" - manet "github.com/multiformats/go-multiaddr/net" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/types" - cumarket "github.com/filecoin-project/lotus/curiosrc/market" - "github.com/filecoin-project/lotus/curiosrc/market/fakelm" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/nullreader" - "github.com/filecoin-project/lotus/metrics/proxy" - "github.com/filecoin-project/lotus/node" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/storage/paths" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -var log = logging.Logger("lmrpc") - -const backpressureWaitTime = 30 * time.Second - -func ServeCurioMarketRPCFromConfig(db *harmonydb.DB, full api.FullNode, cfg *config.CurioConfig) error { - return forEachMarketRPC(cfg, func(maddr string, listen string) error { - addr, err := address.NewFromString(maddr) - if err != nil { - return xerrors.Errorf("parsing actor address: %w", err) - } - - go func() { - err := ServeCurioMarketRPC(db, full, addr, cfg, listen) - if err != nil { - log.Errorf("failed to serve market rpc: %s", err) - } - }() - - return nil - }) -} - -func MakeTokens(cfg *config.CurioConfig) (map[address.Address]string, error) { - out := map[address.Address]string{} - - err := forEachMarketRPC(cfg, func(smaddr string, listen string) error { - ctx := context.Background() - - laddr, err := net.ResolveTCPAddr("tcp", listen) - if err != nil { - return xerrors.Errorf("net resolve: %w", err) - } - - if len(laddr.IP) == 0 || laddr.IP.IsUnspecified() { - return xerrors.Errorf("market rpc server listen address must be a specific address, not %s (probably missing bind IP)", listen) - } - - // need minimal provider with just the config - lp := fakelm.NewLMRPCProvider(nil, nil, address.Undef, 0, 0, nil, nil, cfg) - - tok, err := lp.AuthNew(ctx, api.AllPermissions) - if err != nil { - return err - } - - // parse listen into multiaddr - ma, err := manet.FromNetAddr(laddr) - if err != nil { - return xerrors.Errorf("net from addr (%v): %w", laddr, err) - } - - maddr, err := address.NewFromString(smaddr) - if err != nil { - return xerrors.Errorf("parsing actor address: %w", err) - } - - token := fmt.Sprintf("%s:%s", tok, ma) - out[maddr] = token - - return nil - }) - - return out, err -} - -func forEachMarketRPC(cfg *config.CurioConfig, cb func(string, string) error) error { - for n, server := range cfg.Subsystems.BoostAdapters { - n := n - - // server: [f0.. actor address]:[bind address] - // bind address is either a numeric port or a full address - - // first split at first : to get the actor address and the bind address - split := strings.SplitN(server, ":", 2) - - // if the split length is not 2, return an error - if len(split) != 2 { - return fmt.Errorf("bad market rpc server config %d %s, expected [f0.. actor address]:[bind address]", n, server) - } - - // get the actor address and the bind address - strMaddr, strListen := split[0], split[1] - - maddr, err := address.NewFromString(strMaddr) - if err != nil { - return xerrors.Errorf("parsing actor address: %w", err) - } - - // check the listen address - if strListen == "" { - return fmt.Errorf("bad market rpc server config %d %s, expected [f0.. actor address]:[bind address]", n, server) - } - // if listen address is numeric, prepend the default host - if _, err := strconv.Atoi(strListen); err == nil { - strListen = "0.0.0.0:" + strListen - } - // check if the listen address is a valid address - if _, _, err := net.SplitHostPort(strListen); err != nil { - return fmt.Errorf("bad market rpc server config %d %s, expected [f0.. actor address]:[bind address]", n, server) - } - - log.Infow("Starting market RPC server", "actor", maddr, "listen", strListen) - - if err := cb(strMaddr, strListen); err != nil { - return err - } - } - - return nil -} - -func ServeCurioMarketRPC(db *harmonydb.DB, full api.FullNode, maddr address.Address, conf *config.CurioConfig, listen string) error { - ctx := context.Background() - - pin := cumarket.NewPieceIngester(db, full) - - si := paths.NewDBIndex(nil, db) - - mid, err := address.IDFromAddress(maddr) - if err != nil { - return xerrors.Errorf("getting miner id: %w", err) - } - - mi, err := full.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting miner info: %w", err) - } - - lp := fakelm.NewLMRPCProvider(si, full, maddr, abi.ActorID(mid), mi.SectorSize, pin, db, conf) - - laddr, err := net.ResolveTCPAddr("tcp", listen) - if err != nil { - return xerrors.Errorf("net resolve: %w", err) - } - - if len(laddr.IP) == 0 || laddr.IP.IsUnspecified() { - return xerrors.Errorf("market rpc server listen address must be a specific address, not %s (probably missing bind IP)", listen) - } - rootUrl := url.URL{ - Scheme: "http", - Host: laddr.String(), - } - - ast := api.StorageMinerStruct{} - - ast.CommonStruct.Internal.Version = func(ctx context.Context) (api.APIVersion, error) { - return api.APIVersion{ - Version: "curio-proxy-v0", - APIVersion: api.MinerAPIVersion0, - BlockDelay: build.BlockDelaySecs, - }, nil - } - - ast.CommonStruct.Internal.AuthNew = lp.AuthNew - - ast.Internal.ActorAddress = lp.ActorAddress - ast.Internal.WorkerJobs = lp.WorkerJobs - ast.Internal.SectorsStatus = lp.SectorsStatus - ast.Internal.SectorsList = lp.SectorsList - ast.Internal.SectorsSummary = lp.SectorsSummary - ast.Internal.SectorsListInStates = lp.SectorsListInStates - ast.Internal.StorageRedeclareLocal = lp.StorageRedeclareLocal - ast.Internal.ComputeDataCid = lp.ComputeDataCid - - type pieceInfo struct { - data storiface.Data - size abi.UnpaddedPieceSize - - done chan struct{} - } - - pieceInfoLk := new(sync.Mutex) - pieceInfos := map[uuid.UUID][]pieceInfo{} - - ast.Internal.SectorAddPieceToAny = func(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data, deal api.PieceDealInfo) (api.SectorOffset, error) { - origPieceData := pieceData - defer func() { - closer, ok := origPieceData.(io.Closer) - if !ok { - log.Warnf("DataCid: cannot close pieceData reader %T because it is not an io.Closer", origPieceData) - return - } - if err := closer.Close(); err != nil { - log.Warnw("closing pieceData in DataCid", "error", err) - } - }() - - pi := pieceInfo{ - data: pieceData, - size: pieceSize, - - done: make(chan struct{}), - } - - pieceUUID := uuid.New() - - //color.Blue("%s %s piece assign request with id %s", deal.DealProposal.PieceCID, deal.DealProposal.Provider, pieceUUID) - log.Infow("piece assign request", "piece_cid", deal.DealProposal.PieceCID, "provider", deal.DealProposal.Provider, "piece_uuid", pieceUUID) - - pieceInfoLk.Lock() - pieceInfos[pieceUUID] = append(pieceInfos[pieceUUID], pi) - pieceInfoLk.Unlock() - - // /piece?piece_cid=xxxx - dataUrl := rootUrl - dataUrl.Path = "/piece" - dataUrl.RawQuery = "piece_id=" + pieceUUID.String() - - // add piece entry - - var refID int64 - var pieceWasCreated bool - - for { - var backpressureWait bool - - comm, err := db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { - // BACKPRESSURE - wait, err := maybeApplyBackpressure(tx, conf.Ingest) - if err != nil { - return false, xerrors.Errorf("backpressure checks: %w", err) - } - if wait { - backpressureWait = true - return false, nil - } - - var pieceID int64 - // Attempt to select the piece ID first - err = tx.QueryRow(`SELECT id FROM parked_pieces WHERE piece_cid = $1`, deal.DealProposal.PieceCID.String()).Scan(&pieceID) - - if err != nil { - if err == pgx.ErrNoRows { - // Piece does not exist, attempt to insert - err = tx.QueryRow(` - INSERT INTO parked_pieces (piece_cid, piece_padded_size, piece_raw_size) - VALUES ($1, $2, $3) - ON CONFLICT (piece_cid) DO NOTHING - RETURNING id`, deal.DealProposal.PieceCID.String(), int64(pieceSize.Padded()), int64(pieceSize)).Scan(&pieceID) - if err != nil { - return false, xerrors.Errorf("inserting new parked piece and getting id: %w", err) - } - pieceWasCreated = true // New piece was created - } else { - // Some other error occurred during select - return false, xerrors.Errorf("checking existing parked piece: %w", err) - } - } else { - pieceWasCreated = false // Piece already exists, no new piece was created - } - - // Add parked_piece_ref - err = tx.QueryRow(`INSERT INTO parked_piece_refs (piece_id, data_url) - VALUES ($1, $2) RETURNING ref_id`, pieceID, dataUrl.String()).Scan(&refID) - if err != nil { - return false, xerrors.Errorf("inserting parked piece ref: %w", err) - } - - // If everything went well, commit the transaction - return true, nil // This will commit the transaction - }, harmonydb.OptionRetry()) - if err != nil { - return api.SectorOffset{}, xerrors.Errorf("inserting parked piece: %w", err) - } - if !comm { - if backpressureWait { - // Backpressure was applied, wait and try again - select { - case <-time.After(backpressureWaitTime): - case <-ctx.Done(): - return api.SectorOffset{}, xerrors.Errorf("context done while waiting for backpressure: %w", ctx.Err()) - } - continue - } - - return api.SectorOffset{}, xerrors.Errorf("piece tx didn't commit") - } - - break - } - - // wait for piece to be parked - if pieceWasCreated { - <-pi.done - } else { - // If the piece was not created, we need to close the done channel - close(pi.done) - - go func() { - // close the data reader (drain to eof if it's not a closer) - if closer, ok := pieceData.(io.Closer); ok { - if err := closer.Close(); err != nil { - log.Warnw("closing pieceData in DataCid", "error", err) - } - } else { - log.Warnw("pieceData is not an io.Closer", "type", fmt.Sprintf("%T", pieceData)) - - _, err := io.Copy(io.Discard, pieceData) - if err != nil { - log.Warnw("draining pieceData in DataCid", "error", err) - } - } - }() - } - pieceIDUrl := url.URL{ - Scheme: "pieceref", - Opaque: fmt.Sprintf("%d", refID), - } - - // make a sector - so, err := pin.AllocatePieceToSector(ctx, maddr, deal, int64(pieceSize), pieceIDUrl, nil) - if err != nil { - return api.SectorOffset{}, err - } - - log.Infow("piece assigned to sector", "piece_cid", deal.DealProposal.PieceCID, "sector", so.Sector, "offset", so.Offset) - - return so, nil - } - - ast.Internal.StorageList = si.StorageList - ast.Internal.StorageDetach = si.StorageDetach - ast.Internal.StorageReportHealth = si.StorageReportHealth - ast.Internal.StorageDeclareSector = si.StorageDeclareSector - ast.Internal.StorageDropSector = si.StorageDropSector - ast.Internal.StorageFindSector = si.StorageFindSector - ast.Internal.StorageInfo = si.StorageInfo - ast.Internal.StorageBestAlloc = si.StorageBestAlloc - ast.Internal.StorageLock = si.StorageLock - ast.Internal.StorageTryLock = si.StorageTryLock - ast.Internal.StorageGetLocks = si.StorageGetLocks - - var pieceHandler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) { - // /piece?piece_id=xxxx - pieceUUID := r.URL.Query().Get("piece_id") - - pu, err := uuid.Parse(pieceUUID) - if err != nil { - http.Error(w, "bad piece id", http.StatusBadRequest) - return - } - - if r.Method != http.MethodGet { - http.Error(w, "bad method", http.StatusMethodNotAllowed) - return - } - - fmt.Printf("%s request for piece from %s\n", pieceUUID, r.RemoteAddr) - - pieceInfoLk.Lock() - pis, ok := pieceInfos[pu] - if !ok { - http.Error(w, "piece not found", http.StatusNotFound) - log.Warnw("piece not found", "piece_uuid", pu) - pieceInfoLk.Unlock() - return - } - - // pop - pi := pis[0] - pis = pis[1:] - - pieceInfos[pu] = pis - if len(pis) == 0 { - delete(pieceInfos, pu) - } - - pieceInfoLk.Unlock() - - start := time.Now() - - pieceData := io.LimitReader(io.MultiReader( - pi.data, - nullreader.Reader{}, - ), int64(pi.size)) - - n, err := io.Copy(w, pieceData) - close(pi.done) - - took := time.Since(start) - mbps := float64(n) / (1024 * 1024) / took.Seconds() - - if err != nil { - log.Errorf("copying piece data: %s", err) - return - } - - log.Infow("piece served", "piece_uuid", pu, "size", float64(n)/(1024*1024), "duration", took, "speed", mbps) - } - - finalApi := proxy.LoggingAPI[api.StorageMiner, api.StorageMinerStruct](&ast) - - mh, err := node.MinerHandler(finalApi, false) // todo permissioned - if err != nil { - return err - } - - mux := http.NewServeMux() - mux.Handle("/piece", pieceHandler) - mux.Handle("/", mh) - - server := &http.Server{ - Addr: listen, - Handler: mux, - ReadTimeout: 48 * time.Hour, - WriteTimeout: 48 * time.Hour, // really high because we block until pieces are saved in PiecePark - } - - return server.ListenAndServe() -} - -func maybeApplyBackpressure(tx *harmonydb.Tx, cfg config.CurioIngestConfig) (wait bool, err error) { - var bufferedSDR, bufferedTrees, bufferedPoRep int - err = tx.QueryRow(`WITH BufferedSDR AS ( - SELECT SUM(buffered_count) AS buffered_sdr_count - FROM ( - SELECT COUNT(p.task_id_sdr) - COUNT(t.owner_id) AS buffered_count - FROM sectors_sdr_pipeline p - LEFT JOIN harmony_task t ON p.task_id_sdr = t.id - WHERE p.after_sdr = false - UNION ALL - SELECT COUNT(1) AS buffered_count - FROM parked_pieces - WHERE complete = false - ) AS subquery -), - BufferedTrees AS ( - SELECT COUNT(p.task_id_tree_r) - COUNT(t.owner_id) AS buffered_trees_count - FROM sectors_sdr_pipeline p - LEFT JOIN harmony_task t ON p.task_id_tree_r = t.id - WHERE p.after_sdr = true AND p.after_tree_r = false - ), - BufferedPoRep AS ( - SELECT COUNT(p.task_id_porep) - COUNT(t.owner_id) AS buffered_porep_count - FROM sectors_sdr_pipeline p - LEFT JOIN harmony_task t ON p.task_id_porep = t.id - WHERE p.after_tree_r = true AND p.after_porep = false - ) -SELECT - (SELECT buffered_sdr_count FROM BufferedSDR) AS total_buffered, - (SELECT buffered_trees_count FROM BufferedTrees) AS buffered_trees_count, - (SELECT buffered_porep_count FROM BufferedPoRep) AS buffered_porep_count -`).Scan(&bufferedSDR, &bufferedTrees, &bufferedPoRep) - if err != nil { - return false, xerrors.Errorf("counting parked pieces: %w", err) - } - - if cfg.MaxQueueSDR != 0 && bufferedSDR > cfg.MaxQueueSDR { - log.Debugw("backpressure", "reason", "too many SDR tasks", "buffered", bufferedSDR, "max", cfg.MaxQueueSDR) - return true, nil - } - if cfg.MaxQueueTrees != 0 && bufferedTrees > cfg.MaxQueueTrees { - log.Debugw("backpressure", "reason", "too many tree tasks", "buffered", bufferedTrees, "max", cfg.MaxQueueTrees) - return true, nil - } - if cfg.MaxQueuePoRep != 0 && bufferedPoRep > cfg.MaxQueuePoRep { - log.Debugw("backpressure", "reason", "too many PoRep tasks", "buffered", bufferedPoRep, "max", cfg.MaxQueuePoRep) - return true, nil - } - - return false, nil -} diff --git a/curiosrc/message/sender.go b/curiosrc/message/sender.go deleted file mode 100644 index 614bc0be23a..00000000000 --- a/curiosrc/message/sender.go +++ /dev/null @@ -1,396 +0,0 @@ -package message - -import ( - "bytes" - "context" - "time" - - "github.com/google/uuid" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - "go.uber.org/multierr" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/big" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/lib/promise" -) - -var log = logging.Logger("curio/message") - -var SendLockedWait = 100 * time.Millisecond - -type SenderAPI interface { - StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) - GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) - WalletBalance(ctx context.Context, addr address.Address) (big.Int, error) - MpoolGetNonce(context.Context, address.Address) (uint64, error) - MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error) -} - -type SignerAPI interface { - WalletSignMessage(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) -} - -// Sender abstracts away highly-available message sending with coordination through -// HarmonyDB. It make sure that nonces are assigned transactionally, and that -// messages are correctly broadcasted to the network. It ensures that messages -// are sent serially, and that failures to send don't cause nonce gaps. -type Sender struct { - api SenderAPI - - sendTask *SendTask - - db *harmonydb.DB -} - -type SendTask struct { - sendTF promise.Promise[harmonytask.AddTaskFunc] - - api SenderAPI - signer SignerAPI - - db *harmonydb.DB -} - -func (s *SendTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - ctx := context.TODO() - - // get message from db - - var dbMsg struct { - FromKey string `db:"from_key"` - ToAddr string `db:"to_addr"` - - UnsignedData []byte `db:"unsigned_data"` - UnsignedCid string `db:"unsigned_cid"` - - // may not be null if we have somehow already signed but failed to send this message - Nonce *uint64 `db:"nonce"` - SignedData []byte `db:"signed_data"` - } - - err = s.db.QueryRow(ctx, ` - SELECT from_key, nonce, to_addr, unsigned_data, unsigned_cid - FROM message_sends - WHERE send_task_id = $1`, taskID).Scan( - &dbMsg.FromKey, &dbMsg.Nonce, &dbMsg.ToAddr, &dbMsg.UnsignedData, &dbMsg.UnsignedCid) - if err != nil { - return false, xerrors.Errorf("getting message from db: %w", err) - } - - // deserialize the message - var msg types.Message - err = msg.UnmarshalCBOR(bytes.NewReader(dbMsg.UnsignedData)) - if err != nil { - return false, xerrors.Errorf("unmarshaling unsigned db message: %w", err) - } - - // get db send lock - for { - // check if we still own the task - if !stillOwned() { - return false, xerrors.Errorf("lost ownership of task") - } - - // try to acquire lock - cn, err := s.db.Exec(ctx, ` - INSERT INTO message_send_locks (from_key, task_id, claimed_at) - VALUES ($1, $2, CURRENT_TIMESTAMP) ON CONFLICT (from_key) DO UPDATE - SET task_id = EXCLUDED.task_id, claimed_at = CURRENT_TIMESTAMP - WHERE message_send_locks.task_id = $2;`, dbMsg.FromKey, taskID) - if err != nil { - return false, xerrors.Errorf("acquiring send lock: %w", err) - } - - if cn == 1 { - // we got the lock - break - } - - // we didn't get the lock, wait a bit and try again - log.Infow("waiting for send lock", "task_id", taskID, "from", dbMsg.FromKey) - time.Sleep(SendLockedWait) - } - - // defer release db send lock - defer func() { - _, err2 := s.db.Exec(ctx, ` - DELETE from message_send_locks WHERE from_key = $1 AND task_id = $2`, dbMsg.FromKey, taskID) - if err2 != nil { - log.Errorw("releasing send lock", "task_id", taskID, "from", dbMsg.FromKey, "error", err2) - - // make sure harmony retries this task so that we eventually release this lock - done = false - err = multierr.Append(err, xerrors.Errorf("releasing send lock: %w", err2)) - } - }() - - // assign nonce IF NOT ASSIGNED (max(api.MpoolGetNonce, db nonce+1)) - var sigMsg *types.SignedMessage - - if dbMsg.Nonce == nil { - msgNonce, err := s.api.MpoolGetNonce(ctx, msg.From) - if err != nil { - return false, xerrors.Errorf("getting nonce from mpool: %w", err) - } - - // get nonce from db - var dbNonce *uint64 - r := s.db.QueryRow(ctx, ` - SELECT MAX(nonce) FROM message_sends WHERE from_key = $1 AND send_success = true`, msg.From.String()) - if err := r.Scan(&dbNonce); err != nil { - return false, xerrors.Errorf("getting nonce from db: %w", err) - } - - if dbNonce != nil && *dbNonce+1 > msgNonce { - msgNonce = *dbNonce + 1 - } - - msg.Nonce = msgNonce - - // sign message - sigMsg, err = s.signer.WalletSignMessage(ctx, msg.From, &msg) - if err != nil { - return false, xerrors.Errorf("signing message: %w", err) - } - - data, err := sigMsg.Serialize() - if err != nil { - return false, xerrors.Errorf("serializing message: %w", err) - } - - jsonBytes, err := sigMsg.MarshalJSON() - if err != nil { - return false, xerrors.Errorf("marshaling message: %w", err) - } - - // write to db - - n, err := s.db.Exec(ctx, ` - UPDATE message_sends SET nonce = $1, signed_data = $2, signed_json = $3, signed_cid = $4 - WHERE send_task_id = $5`, - msg.Nonce, data, string(jsonBytes), sigMsg.Cid().String(), taskID) - if err != nil { - return false, xerrors.Errorf("updating db record: %w", err) - } - if n != 1 { - log.Errorw("updating db record: expected 1 row to be affected, got %d", n) - return false, xerrors.Errorf("updating db record: expected 1 row to be affected, got %d", n) - } - } else { - // Note: this handles an unlikely edge-case: - // We have previously signed the message but either failed to send it or failed to update the db - // note that when that happens the likely cause is the curio process losing its db connection - // or getting killed before it can update the db. In that case the message lock will still be held - // so it will be safe to rebroadcast the signed message - - // deserialize the signed message - sigMsg = new(types.SignedMessage) - err = sigMsg.UnmarshalCBOR(bytes.NewReader(dbMsg.SignedData)) - if err != nil { - return false, xerrors.Errorf("unmarshaling signed db message: %w", err) - } - } - - // send! - _, err = s.api.MpoolPush(ctx, sigMsg) - - // persist send result - var sendSuccess = err == nil - var sendError string - if err != nil { - sendError = err.Error() - } - - _, err = s.db.Exec(ctx, ` - UPDATE message_sends SET send_success = $1, send_error = $2, send_time = CURRENT_TIMESTAMP - WHERE send_task_id = $3`, sendSuccess, sendError, taskID) - if err != nil { - return false, xerrors.Errorf("updating db record: %w", err) - } - - return true, nil -} - -func (s *SendTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - if len(ids) == 0 { - // probably can't happen, but panicking is bad - return nil, nil - } - - if s.signer == nil { - // can't sign messages here - return nil, nil - } - - return &ids[0], nil -} - -func (s *SendTask) TypeDetails() harmonytask.TaskTypeDetails { - return harmonytask.TaskTypeDetails{ - Max: 1024, - Name: "SendMessage", - Cost: resources.Resources{ - Cpu: 0, - Gpu: 0, - Ram: 1 << 20, - }, - MaxFailures: 1000, - Follows: nil, - } -} - -func (s *SendTask) Adder(taskFunc harmonytask.AddTaskFunc) { - s.sendTF.Set(taskFunc) -} - -var _ harmonytask.TaskInterface = &SendTask{} - -// NewSender creates a new Sender. -func NewSender(api SenderAPI, signer SignerAPI, db *harmonydb.DB) (*Sender, *SendTask) { - st := &SendTask{ - api: api, - signer: signer, - db: db, - } - - return &Sender{ - api: api, - db: db, - - sendTask: st, - }, st -} - -// Send atomically assigns a nonce, signs, and pushes a message -// to mempool. -// maxFee is only used when GasFeeCap/GasPremium fields aren't specified -// -// When maxFee is set to 0, Send will guess appropriate fee -// based on current chain conditions -// -// Send behaves much like fullnodeApi.MpoolPushMessage, but it coordinates -// through HarmonyDB, making it safe to broadcast messages from multiple independent -// API nodes -// -// Send is also currently more strict about required parameters than MpoolPushMessage -func (s *Sender) Send(ctx context.Context, msg *types.Message, mss *api.MessageSendSpec, reason string) (cid.Cid, error) { - if mss == nil { - return cid.Undef, xerrors.Errorf("MessageSendSpec cannot be nil") - } - if (mss.MsgUuid != uuid.UUID{}) { - return cid.Undef, xerrors.Errorf("MessageSendSpec.MsgUuid must be zero") - } - - fromA, err := s.api.StateAccountKey(ctx, msg.From, types.EmptyTSK) - if err != nil { - return cid.Undef, xerrors.Errorf("getting key address: %w", err) - } - - msg.From = fromA - - if msg.Nonce != 0 { - return cid.Undef, xerrors.Errorf("Send expects message nonce to be 0, was %d", msg.Nonce) - } - - msg, err = s.api.GasEstimateMessageGas(ctx, msg, mss, types.EmptyTSK) - if err != nil { - return cid.Undef, xerrors.Errorf("GasEstimateMessageGas error: %w", err) - } - - b, err := s.api.WalletBalance(ctx, msg.From) - if err != nil { - return cid.Undef, xerrors.Errorf("mpool push: getting origin balance: %w", err) - } - - requiredFunds := big.Add(msg.Value, msg.RequiredFunds()) - if b.LessThan(requiredFunds) { - return cid.Undef, xerrors.Errorf("mpool push: not enough funds: %s < %s", b, requiredFunds) - } - - // push the task - taskAdder := s.sendTask.sendTF.Val(ctx) - - unsBytes := new(bytes.Buffer) - err = msg.MarshalCBOR(unsBytes) - if err != nil { - return cid.Undef, xerrors.Errorf("marshaling message: %w", err) - } - - var sendTaskID *harmonytask.TaskID - taskAdder(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { - _, err := tx.Exec(`insert into message_sends (from_key, to_addr, send_reason, unsigned_data, unsigned_cid, send_task_id) values ($1, $2, $3, $4, $5, $6)`, - msg.From.String(), msg.To.String(), reason, unsBytes.Bytes(), msg.Cid().String(), id) - if err != nil { - return false, xerrors.Errorf("inserting message into db: %w", err) - } - - sendTaskID = &id - - return true, nil - }) - - if sendTaskID == nil { - return cid.Undef, xerrors.Errorf("failed to add task") - } - - // wait for exec - var ( - pollInterval = 50 * time.Millisecond - pollIntervalMul = 2 - maxPollInterval = 5 * time.Second - pollLoops = 0 - - sigCid cid.Cid - sendErr error - ) - - for { - var err error - var sigCidStr, sendError *string - var sendSuccess *bool - - err = s.db.QueryRow(ctx, `select signed_cid, send_success, send_error from message_sends where send_task_id = $1`, &sendTaskID).Scan(&sigCidStr, &sendSuccess, &sendError) - if err != nil { - return cid.Undef, xerrors.Errorf("getting cid for task: %w", err) - } - - if sendSuccess == nil { - time.Sleep(pollInterval) - pollLoops++ - pollInterval *= time.Duration(pollIntervalMul) - if pollInterval > maxPollInterval { - pollInterval = maxPollInterval - } - - continue - } - - if sigCidStr == nil || sendError == nil { - // should never happen because sendSuccess is already not null here - return cid.Undef, xerrors.Errorf("got null values for sigCidStr or sendError, this should never happen") - } - - if !*sendSuccess { - sendErr = xerrors.Errorf("send error: %s", *sendError) - } else { - sigCid, err = cid.Parse(*sigCidStr) - if err != nil { - return cid.Undef, xerrors.Errorf("parsing signed cid: %w", err) - } - } - - break - } - - log.Infow("sent message", "cid", sigCid, "task_id", sendTaskID, "send_error", sendErr, "poll_loops", pollLoops) - - return sigCid, sendErr -} diff --git a/curiosrc/message/watch.go b/curiosrc/message/watch.go deleted file mode 100644 index 2253df28434..00000000000 --- a/curiosrc/message/watch.go +++ /dev/null @@ -1,214 +0,0 @@ -package message - -import ( - "context" - "encoding/json" - "sync/atomic" - - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/curiosrc/chainsched" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" -) - -const MinConfidence = 6 - -type MessageWaiterApi interface { - StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) - ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) - ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) - StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) - ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) -} - -type MessageWatcher struct { - db *harmonydb.DB - ht *harmonytask.TaskEngine - api MessageWaiterApi - - stopping, stopped chan struct{} - - updateCh chan struct{} - bestTs atomic.Pointer[types.TipSetKey] -} - -func NewMessageWatcher(db *harmonydb.DB, ht *harmonytask.TaskEngine, pcs *chainsched.CurioChainSched, api MessageWaiterApi) (*MessageWatcher, error) { - mw := &MessageWatcher{ - db: db, - ht: ht, - api: api, - stopping: make(chan struct{}), - stopped: make(chan struct{}), - updateCh: make(chan struct{}), - } - go mw.run() - if err := pcs.AddHandler(mw.processHeadChange); err != nil { - return nil, err - } - return mw, nil -} - -func (mw *MessageWatcher) run() { - defer close(mw.stopped) - - for { - select { - case <-mw.stopping: - // todo cleanup assignments - return - case <-mw.updateCh: - mw.update() - } - } -} - -func (mw *MessageWatcher) update() { - ctx := context.Background() - - tsk := *mw.bestTs.Load() - - ts, err := mw.api.ChainGetTipSet(ctx, tsk) - if err != nil { - log.Errorf("failed to get tipset: %+v", err) - return - } - - lbts, err := mw.api.ChainGetTipSetByHeight(ctx, ts.Height()-MinConfidence, tsk) - if err != nil { - log.Errorf("failed to get tipset: %+v", err) - return - } - lbtsk := lbts.Key() - - machineID := mw.ht.ResourcesAvailable().MachineID - - // first if we see pending messages with null owner, assign them to ourselves - { - n, err := mw.db.Exec(ctx, `UPDATE message_waits SET waiter_machine_id = $1 WHERE waiter_machine_id IS NULL AND executed_tsk_cid IS NULL`, machineID) - if err != nil { - log.Errorf("failed to assign pending messages: %+v", err) - return - } - if n > 0 { - log.Debugw("assigned pending messages to ourselves", "assigned", n) - } - } - - // get messages assigned to us - var msgs []struct { - Cid string `db:"signed_message_cid"` - From string `db:"from_key"` - Nonce uint64 `db:"nonce"` - - FromAddr address.Address `db:"-"` - } - - // really large limit in case of things getting stuck and backlogging severely - err = mw.db.Select(ctx, &msgs, `SELECT signed_message_cid, from_key, nonce FROM message_waits - JOIN message_sends ON signed_message_cid = signed_cid - WHERE waiter_machine_id = $1 LIMIT 10000`, machineID) - if err != nil { - log.Errorf("failed to get assigned messages: %+v", err) - return - } - - // get address/nonce set to check - toCheck := make(map[address.Address]uint64) - - for i := range msgs { - msgs[i].FromAddr, err = address.NewFromString(msgs[i].From) - if err != nil { - log.Errorf("failed to parse from address: %+v", err) - return - } - toCheck[msgs[i].FromAddr] = 0 - } - - // get the nonce for each address - for addr := range toCheck { - act, err := mw.api.StateGetActor(ctx, addr, lbtsk) - if err != nil { - log.Errorf("failed to get actor: %+v", err) - return - } - - toCheck[addr] = act.Nonce - } - - // check if any of the messages we have assigned to us are now on chain, and have been for MinConfidence epochs - for _, msg := range msgs { - if msg.Nonce > toCheck[msg.FromAddr] { - continue // definitely not on chain yet - } - - look, err := mw.api.StateSearchMsg(ctx, lbtsk, cid.MustParse(msg.Cid), api.LookbackNoLimit, false) - if err != nil { - log.Errorf("failed to search for message: %+v", err) - return - } - - if look == nil { - continue // not on chain yet (or not executed yet) - } - - tskCid, err := look.TipSet.Cid() - if err != nil { - log.Errorf("failed to get tipset cid: %+v", err) - return - } - - emsg, err := mw.api.ChainGetMessage(ctx, look.Message) - if err != nil { - log.Errorf("failed to get message: %+v", err) - return - } - - execMsg, err := json.Marshal(emsg) - if err != nil { - log.Errorf("failed to marshal message: %+v", err) - return - } - - // record in db - _, err = mw.db.Exec(ctx, `UPDATE message_waits SET - waiter_machine_id = NULL, - executed_tsk_cid = $1, executed_tsk_epoch = $2, - executed_msg_cid = $3, executed_msg_data = $4, - executed_rcpt_exitcode = $5, executed_rcpt_return = $6, executed_rcpt_gas_used = $7 - WHERE signed_message_cid = $8`, tskCid, look.Height, - look.Message, execMsg, - look.Receipt.ExitCode, look.Receipt.Return, look.Receipt.GasUsed, - msg.Cid) - if err != nil { - log.Errorf("failed to update message wait: %+v", err) - return - } - } -} - -func (mw *MessageWatcher) Stop(ctx context.Context) error { - close(mw.stopping) - select { - case <-mw.stopped: - case <-ctx.Done(): - return ctx.Err() - } - - return nil -} - -func (mw *MessageWatcher) processHeadChange(ctx context.Context, revert *types.TipSet, apply *types.TipSet) error { - best := apply.Key() - mw.bestTs.Store(&best) - select { - case mw.updateCh <- struct{}{}: - default: - } - return nil -} diff --git a/curiosrc/multictladdr/multiaddresses.go b/curiosrc/multictladdr/multiaddresses.go deleted file mode 100644 index af751ff17e7..00000000000 --- a/curiosrc/multictladdr/multiaddresses.go +++ /dev/null @@ -1,81 +0,0 @@ -package multictladdr - -import ( - "context" - - logging "github.com/ipfs/go-log/v2" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/storage/ctladdr" -) - -var log = logging.Logger("curio/multictladdr") - -type MultiAddressSelector struct { - MinerMap map[address.Address]api.AddressConfig -} - -func (as *MultiAddressSelector) AddressFor(ctx context.Context, a ctladdr.NodeApi, minerID address.Address, mi api.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { - if as == nil { - // should only happen in some tests - log.Warnw("smart address selection disabled, using worker address") - return mi.Worker, big.Zero(), nil - } - - tmp := as.MinerMap[minerID] - - var addrs []address.Address - switch use { - case api.PreCommitAddr: - addrs = append(addrs, tmp.PreCommitControl...) - case api.CommitAddr: - addrs = append(addrs, tmp.CommitControl...) - case api.TerminateSectorsAddr: - addrs = append(addrs, tmp.TerminateControl...) - case api.DealPublishAddr: - addrs = append(addrs, tmp.DealPublishControl...) - default: - defaultCtl := map[address.Address]struct{}{} - for _, a := range mi.ControlAddresses { - defaultCtl[a] = struct{}{} - } - delete(defaultCtl, mi.Owner) - delete(defaultCtl, mi.Worker) - - configCtl := append([]address.Address{}, tmp.PreCommitControl...) - configCtl = append(configCtl, tmp.CommitControl...) - configCtl = append(configCtl, tmp.TerminateControl...) - configCtl = append(configCtl, tmp.DealPublishControl...) - - for _, addr := range configCtl { - if addr.Protocol() != address.ID { - var err error - addr, err = a.StateLookupID(ctx, addr, types.EmptyTSK) - if err != nil { - log.Warnw("looking up control address", "address", addr, "error", err) - continue - } - } - - delete(defaultCtl, addr) - } - - for a := range defaultCtl { - addrs = append(addrs, a) - } - } - - if len(addrs) == 0 || !tmp.DisableWorkerFallback { - addrs = append(addrs, mi.Worker) - } - if !tmp.DisableOwnerFallback { - addrs = append(addrs, mi.Owner) - } - - return ctladdr.PickAddress(ctx, a, mi, goodFunds, minFunds, addrs) -} diff --git a/curiosrc/piece/task_cleanup_piece.go b/curiosrc/piece/task_cleanup_piece.go deleted file mode 100644 index ed22ccb46d3..00000000000 --- a/curiosrc/piece/task_cleanup_piece.go +++ /dev/null @@ -1,130 +0,0 @@ -package piece - -import ( - "context" - "time" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/curiosrc/ffi" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/lib/promise" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -type CleanupPieceTask struct { - max int - db *harmonydb.DB - sc *ffi.SealCalls - - TF promise.Promise[harmonytask.AddTaskFunc] -} - -func NewCleanupPieceTask(db *harmonydb.DB, sc *ffi.SealCalls, max int) *CleanupPieceTask { - pt := &CleanupPieceTask{ - db: db, - sc: sc, - - max: max, - } - go pt.pollCleanupTasks(context.Background()) - return pt -} - -func (c *CleanupPieceTask) pollCleanupTasks(ctx context.Context) { - for { - // select pieces with no refs and null cleanup_task_id - var pieceIDs []struct { - ID storiface.PieceNumber `db:"id"` - } - - err := c.db.Select(ctx, &pieceIDs, `SELECT id FROM parked_pieces WHERE cleanup_task_id IS NULL AND (SELECT count(*) FROM parked_piece_refs WHERE piece_id = parked_pieces.id) = 0`) - if err != nil { - log.Errorf("failed to get parked pieces: %s", err) - time.Sleep(PieceParkPollInterval) - continue - } - - if len(pieceIDs) == 0 { - time.Sleep(PieceParkPollInterval) - continue - } - - for _, pieceID := range pieceIDs { - pieceID := pieceID - - // create a task for each piece - c.TF.Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, err error) { - // update - n, err := tx.Exec(`UPDATE parked_pieces SET cleanup_task_id = $1 WHERE id = $2 AND (SELECT count(*) FROM parked_piece_refs WHERE piece_id = parked_pieces.id) = 0`, id, pieceID.ID) - if err != nil { - return false, xerrors.Errorf("updating parked piece: %w", err) - } - - // commit only if we updated the piece - return n > 0, nil - }) - } - } -} - -func (c *CleanupPieceTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - ctx := context.Background() - - // select by cleanup_task_id - var pieceID int64 - - err = c.db.QueryRow(ctx, "SELECT id FROM parked_pieces WHERE cleanup_task_id = $1", taskID).Scan(&pieceID) - if err != nil { - return false, xerrors.Errorf("query parked_piece: %w", err) - } - - // delete from parked_pieces where id = $1 where ref count = 0 - // note: we delete from the db first because that guarantees that the piece is no longer in use - // if storage delete fails, it will be retried later is other cleanup tasks - n, err := c.db.Exec(ctx, "DELETE FROM parked_pieces WHERE id = $1 AND (SELECT count(*) FROM parked_piece_refs WHERE piece_id = $1) = 0", pieceID) - if err != nil { - return false, xerrors.Errorf("delete parked_piece: %w", err) - } - - if n == 0 { - return true, nil - } - - // remove from storage - err = c.sc.RemovePiece(ctx, storiface.PieceNumber(pieceID)) - if err != nil { - log.Errorw("remove piece", "piece_id", pieceID, "error", err) - } - - return true, nil -} - -func (c *CleanupPieceTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - // the remove call runs on paths.Remote storage, so it doesn't really matter where it runs - - id := ids[0] - return &id, nil -} - -func (c *CleanupPieceTask) TypeDetails() harmonytask.TaskTypeDetails { - return harmonytask.TaskTypeDetails{ - Max: c.max, - Name: "DropPiece", - Cost: resources.Resources{ - Cpu: 1, - Gpu: 0, - Ram: 64 << 20, - Storage: nil, - }, - MaxFailures: 10, - } -} - -func (c *CleanupPieceTask) Adder(taskFunc harmonytask.AddTaskFunc) { - c.TF.Set(taskFunc) -} - -var _ harmonytask.TaskInterface = &CleanupPieceTask{} diff --git a/curiosrc/piece/task_park_piece.go b/curiosrc/piece/task_park_piece.go deleted file mode 100644 index 68a94a295a0..00000000000 --- a/curiosrc/piece/task_park_piece.go +++ /dev/null @@ -1,221 +0,0 @@ -package piece - -import ( - "context" - "encoding/json" - "strconv" - "time" - - logging "github.com/ipfs/go-log/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/curiosrc/ffi" - "github.com/filecoin-project/lotus/curiosrc/seal" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/lib/promise" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -var log = logging.Logger("lppiece") -var PieceParkPollInterval = time.Second * 15 - -// ParkPieceTask gets a piece from some origin, and parks it in storage -// Pieces are always f00, piece ID is mapped to pieceCID in the DB -type ParkPieceTask struct { - db *harmonydb.DB - sc *ffi.SealCalls - - TF promise.Promise[harmonytask.AddTaskFunc] - - max int -} - -func NewParkPieceTask(db *harmonydb.DB, sc *ffi.SealCalls, max int) *ParkPieceTask { - pt := &ParkPieceTask{ - db: db, - sc: sc, - - max: max, - } - - go pt.pollPieceTasks(context.Background()) - return pt -} - -func (p *ParkPieceTask) pollPieceTasks(ctx context.Context) { - for { - // select parked pieces with no task_id - var pieceIDs []struct { - ID storiface.PieceNumber `db:"id"` - } - - err := p.db.Select(ctx, &pieceIDs, `SELECT id FROM parked_pieces WHERE complete = FALSE AND task_id IS NULL`) - if err != nil { - log.Errorf("failed to get parked pieces: %s", err) - time.Sleep(PieceParkPollInterval) - continue - } - - if len(pieceIDs) == 0 { - time.Sleep(PieceParkPollInterval) - continue - } - - for _, pieceID := range pieceIDs { - pieceID := pieceID - - // create a task for each piece - p.TF.Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, err error) { - // update - n, err := tx.Exec(`UPDATE parked_pieces SET task_id = $1 WHERE id = $2 AND complete = FALSE AND task_id IS NULL`, id, pieceID.ID) - if err != nil { - return false, xerrors.Errorf("updating parked piece: %w", err) - } - - // commit only if we updated the piece - return n > 0, nil - }) - } - } -} - -func (p *ParkPieceTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - ctx := context.Background() - - // Define a struct to hold piece data. - var piecesData []struct { - PieceID int64 `db:"id"` - PieceCreatedAt time.Time `db:"created_at"` - PieceCID string `db:"piece_cid"` - Complete bool `db:"complete"` - PiecePaddedSize int64 `db:"piece_padded_size"` - PieceRawSize string `db:"piece_raw_size"` - } - - // Select the piece data using the task ID. - err = p.db.Select(ctx, &piecesData, ` - SELECT id, created_at, piece_cid, complete, piece_padded_size, piece_raw_size - FROM parked_pieces - WHERE task_id = $1 - `, taskID) - if err != nil { - return false, xerrors.Errorf("fetching piece data: %w", err) - } - - if len(piecesData) == 0 { - return false, xerrors.Errorf("no piece data found for task_id: %d", taskID) - } - - pieceData := piecesData[0] - - if pieceData.Complete { - log.Warnw("park piece task already complete", "task_id", taskID, "piece_cid", pieceData.PieceCID) - return true, nil - } - - // Define a struct for reference data. - var refData []struct { - DataURL string `db:"data_url"` - DataHeaders json.RawMessage `db:"data_headers"` - } - - // Now, select the first reference data that has a URL. - err = p.db.Select(ctx, &refData, ` - SELECT data_url, data_headers - FROM parked_piece_refs - WHERE piece_id = $1 AND data_url IS NOT NULL - LIMIT 1 - `, pieceData.PieceID) - if err != nil { - return false, xerrors.Errorf("fetching reference data: %w", err) - } - - if len(refData) == 0 { - return false, xerrors.Errorf("no refs found for piece_id: %d", pieceData.PieceID) - } - - // Convert piece_raw_size from string to int64. - pieceRawSize, err := strconv.ParseInt(pieceData.PieceRawSize, 10, 64) - if err != nil { - return false, xerrors.Errorf("parsing piece raw size: %w", err) - } - - if refData[0].DataURL != "" { - upr := &seal.UrlPieceReader{ - Url: refData[0].DataURL, - RawSize: pieceRawSize, - } - defer func() { - _ = upr.Close() - }() - - pnum := storiface.PieceNumber(pieceData.PieceID) - - if err := p.sc.WritePiece(ctx, &taskID, pnum, pieceRawSize, upr); err != nil { - return false, xerrors.Errorf("write piece: %w", err) - } - - // Update the piece as complete after a successful write. - _, err = p.db.Exec(ctx, `UPDATE parked_pieces SET complete = TRUE WHERE id = $1`, pieceData.PieceID) - if err != nil { - return false, xerrors.Errorf("marking piece as complete: %w", err) - } - - return true, nil - } - - // If no URL is found, this indicates an issue since at least one URL is expected. - return false, xerrors.Errorf("no data URL found for piece_id: %d", pieceData.PieceID) -} - -func (p *ParkPieceTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - id := ids[0] - return &id, nil -} - -func (p *ParkPieceTask) TypeDetails() harmonytask.TaskTypeDetails { - const maxSizePiece = 64 << 30 - - return harmonytask.TaskTypeDetails{ - Max: p.max, - Name: "ParkPiece", - Cost: resources.Resources{ - Cpu: 1, - Gpu: 0, - Ram: 64 << 20, - Storage: p.sc.Storage(p.taskToRef, storiface.FTPiece, storiface.FTNone, maxSizePiece, storiface.PathSealing), - }, - MaxFailures: 10, - } -} - -func (p *ParkPieceTask) taskToRef(id harmonytask.TaskID) (ffi.SectorRef, error) { - var pieceIDs []struct { - ID storiface.PieceNumber `db:"id"` - } - - err := p.db.Select(context.Background(), &pieceIDs, `SELECT id FROM parked_pieces WHERE task_id = $1`, id) - if err != nil { - return ffi.SectorRef{}, xerrors.Errorf("getting piece id: %w", err) - } - - if len(pieceIDs) != 1 { - return ffi.SectorRef{}, xerrors.Errorf("expected 1 piece id, got %d", len(pieceIDs)) - } - - pref := pieceIDs[0].ID.Ref() - - return ffi.SectorRef{ - SpID: int64(pref.ID.Miner), - SectorNumber: int64(pref.ID.Number), - RegSealProof: pref.ProofType, - }, nil -} - -func (p *ParkPieceTask) Adder(taskFunc harmonytask.AddTaskFunc) { - p.TF.Set(taskFunc) -} - -var _ harmonytask.TaskInterface = &ParkPieceTask{} diff --git a/curiosrc/proof/treed_build.go b/curiosrc/proof/treed_build.go deleted file mode 100644 index 7145c9257ca..00000000000 --- a/curiosrc/proof/treed_build.go +++ /dev/null @@ -1,292 +0,0 @@ -package proof - -import ( - "io" - "math/bits" - "os" - "runtime" - "sync" - "time" - - "github.com/hashicorp/go-multierror" - "github.com/ipfs/go-cid" - pool "github.com/libp2p/go-buffer-pool" - "github.com/minio/sha256-simd" - "golang.org/x/xerrors" - - commcid "github.com/filecoin-project/go-fil-commcid" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/storage/sealer/fr32" -) - -const nodeSize = 32 -const threadChunkSize = 1 << 20 - -func hashChunk(data [][]byte) { - l1Nodes := len(data[0]) / nodeSize / 2 - - d := sha256.New() - - sumBuf := make([]byte, nodeSize) - - for i := 0; i < l1Nodes; i++ { - levels := bits.TrailingZeros(^uint(i)) + 1 - - inNode := i * 2 // at level 0 - outNode := i - - for l := 0; l < levels; l++ { - d.Reset() - inNodeData := data[l][inNode*nodeSize : (inNode+2)*nodeSize] - d.Write(inNodeData) - copy(data[l+1][outNode*nodeSize:(outNode+1)*nodeSize], d.Sum(sumBuf[:0])) - // set top bits to 00 - data[l+1][outNode*nodeSize+nodeSize-1] &= 0x3f - - inNode-- - inNode >>= 1 - outNode >>= 1 - } - } -} - -func BuildTreeD(data io.Reader, unpaddedData bool, outPath string, size abi.PaddedPieceSize) (_ cid.Cid, err error) { - out, err := os.Create(outPath) - if err != nil { - return cid.Undef, err - } - defer func() { - cerr := out.Close() - - if err != nil { - // remove the file, it's probably bad - rerr := os.Remove(outPath) - if rerr != nil { - err = multierror.Append(err, rerr) - } - } - - if cerr != nil { - err = multierror.Append(err, cerr) - } - }() - - outSize := treeSize(size) - - // allocate space for the tree - err = out.Truncate(int64(outSize)) - if err != nil { - return cid.Undef, err - } - - // setup buffers - maxThreads := int64(size) / threadChunkSize - if maxThreads > int64(runtime.NumCPU())*15/10 { - maxThreads = int64(runtime.NumCPU()) * 15 / 10 - } - if maxThreads < 1 { - maxThreads = 1 - } - - // allocate buffers - var bufLk sync.Mutex - workerBuffers := make([][][]byte, maxThreads) // [worker][level][levelSize] - - for i := range workerBuffers { - workerBuffer := make([][]byte, 1) - - bottomBufSize := int64(threadChunkSize) - if bottomBufSize > int64(size) { - bottomBufSize = int64(size) - } - workerBuffer[0] = pool.Get(int(bottomBufSize)) - - // append levels until we get to a 32 byte level - for len(workerBuffer[len(workerBuffer)-1]) > 32 { - newLevel := pool.Get(len(workerBuffer[len(workerBuffer)-1]) / 2) - workerBuffer = append(workerBuffer, newLevel) - } - workerBuffers[i] = workerBuffer - } - - // prepare apex buffer - var apexBuf [][]byte - { - apexBottomSize := uint64(size) / uint64(len(workerBuffers[0][0])) - if apexBottomSize == 0 { - apexBottomSize = 1 - } - - apexBuf = make([][]byte, 1) - apexBuf[0] = pool.Get(int(apexBottomSize * nodeSize)) - for len(apexBuf[len(apexBuf)-1]) > 32 { - newLevel := pool.Get(len(apexBuf[len(apexBuf)-1]) / 2) - apexBuf = append(apexBuf, newLevel) - } - } - - // defer free pool buffers - defer func() { - for _, workerBuffer := range workerBuffers { - for _, level := range workerBuffer { - pool.Put(level) - } - } - for _, level := range apexBuf { - pool.Put(level) - } - }() - - // start processing - var processed uint64 - var workWg sync.WaitGroup - var errLock sync.Mutex - var oerr error - - for processed < uint64(size) { - // get a buffer - bufLk.Lock() - if len(workerBuffers) == 0 { - bufLk.Unlock() - time.Sleep(50 * time.Microsecond) - continue - } - - // pop last - workBuffer := workerBuffers[len(workerBuffers)-1] - workerBuffers = workerBuffers[:len(workerBuffers)-1] - - bufLk.Unlock() - - // before reading check that we didn't get a write error - errLock.Lock() - if oerr != nil { - errLock.Unlock() - return cid.Undef, oerr - } - errLock.Unlock() - - // read data into the bottom level - // note: the bottom level will never be too big; data is power of two - // size, and if it's smaller than a single buffer, we only have one - // smaller buffer - - processedSize := uint64(len(workBuffer[0])) - if unpaddedData { - workBuffer[0] = workBuffer[0][:abi.PaddedPieceSize(len(workBuffer[0])).Unpadded()] - } - - _, err := io.ReadFull(data, workBuffer[0]) - if err != nil && err != io.EOF { - return cid.Undef, err - } - - // start processing - workWg.Add(1) - go func(startOffset uint64) { - defer workWg.Done() - - if unpaddedData { - paddedBuf := pool.Get(int(abi.UnpaddedPieceSize(len(workBuffer[0])).Padded())) - fr32.PadSingle(workBuffer[0], paddedBuf) - pool.Put(workBuffer[0]) - workBuffer[0] = paddedBuf - } - hashChunk(workBuffer) - - // persist apex - { - apexHash := workBuffer[len(workBuffer)-1] - hashPos := startOffset / uint64(len(workBuffer[0])) * nodeSize - - copy(apexBuf[0][hashPos:hashPos+nodeSize], apexHash) - } - - // write results - offsetInLayer := startOffset - for layer, layerData := range workBuffer { - - // layerOff is outSize:bits[most significant bit - layer] - layerOff := layerOffset(uint64(size), layer) - dataOff := offsetInLayer + layerOff - offsetInLayer /= 2 - - _, werr := out.WriteAt(layerData, int64(dataOff)) - if werr != nil { - errLock.Lock() - oerr = multierror.Append(oerr, werr) - errLock.Unlock() - return - } - } - - // return buffer - bufLk.Lock() - workerBuffers = append(workerBuffers, workBuffer) - bufLk.Unlock() - }(processed) - - processed += processedSize - } - - workWg.Wait() - - if oerr != nil { - return cid.Undef, oerr - } - - threadLayers := bits.Len(uint(len(workerBuffers[0][0])) / nodeSize) - - if len(apexBuf) > 0 { - // hash the apex - hashChunk(apexBuf) - - // write apex - for apexLayer, layerData := range apexBuf { - if apexLayer == 0 { - continue - } - layer := apexLayer + threadLayers - 1 - - layerOff := layerOffset(uint64(size), layer) - _, werr := out.WriteAt(layerData, int64(layerOff)) - if werr != nil { - return cid.Undef, xerrors.Errorf("write apex: %w", werr) - } - } - } - - var commp [32]byte - copy(commp[:], apexBuf[len(apexBuf)-1]) - - commCid, err := commcid.DataCommitmentV1ToCID(commp[:]) - if err != nil { - return cid.Undef, err - } - - return commCid, nil -} - -func treeSize(data abi.PaddedPieceSize) uint64 { - bytesToAlloc := uint64(data) - - // append bytes until we get to nodeSize - for todo := bytesToAlloc; todo > nodeSize; todo /= 2 { - bytesToAlloc += todo / 2 - } - - return bytesToAlloc -} - -func layerOffset(size uint64, layer int) uint64 { - allOnes := uint64(0xffff_ffff_ffff_ffff) - - // get 'layer' bits set to 1 - layerOnes := allOnes >> uint64(64-layer) - - // shift layerOnes to the left such that the highest bit is at the same position as the highest bit in size (which is power-of-two) - sizeBitPos := bits.Len64(size) - 1 - layerOnes <<= sizeBitPos - (layer - 1) - return layerOnes -} diff --git a/curiosrc/proof/treed_build_test.go b/curiosrc/proof/treed_build_test.go deleted file mode 100644 index f69e9832247..00000000000 --- a/curiosrc/proof/treed_build_test.go +++ /dev/null @@ -1,516 +0,0 @@ -package proof - -import ( - "bufio" - "bytes" - "crypto/rand" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "testing" - - pool "github.com/libp2p/go-buffer-pool" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/storage/pipeline/lib/nullreader" -) - -func TestTreeSize(t *testing.T) { - require.Equal(t, uint64(32), treeSize(abi.PaddedPieceSize(32))) - require.Equal(t, uint64(64+32), treeSize(abi.PaddedPieceSize(64))) - require.Equal(t, uint64(128+64+32), treeSize(abi.PaddedPieceSize(128))) - require.Equal(t, uint64(256+128+64+32), treeSize(abi.PaddedPieceSize(256))) -} - -func TestTreeLayerOffset(t *testing.T) { - require.Equal(t, uint64(0), layerOffset(128, 0)) - require.Equal(t, uint64(128), layerOffset(128, 1)) - require.Equal(t, uint64(128+64), layerOffset(128, 2)) - require.Equal(t, uint64(128+64+32), layerOffset(128, 3)) -} - -func TestHashChunk(t *testing.T) { - chunk := make([]byte, 64) - chunk[0] = 0x01 - - out := make([]byte, 32) - - data := [][]byte{chunk, out} - hashChunk(data) - - // 16 ab ab 34 1f b7 f3 70 e2 7e 4d ad cf 81 76 6d - // d0 df d0 ae 64 46 94 77 bb 2c f6 61 49 38 b2 2f - expect := []byte{ - 0x16, 0xab, 0xab, 0x34, 0x1f, 0xb7, 0xf3, 0x70, - 0xe2, 0x7e, 0x4d, 0xad, 0xcf, 0x81, 0x76, 0x6d, - 0xd0, 0xdf, 0xd0, 0xae, 0x64, 0x46, 0x94, 0x77, - 0xbb, 0x2c, 0xf6, 0x61, 0x49, 0x38, 0xb2, 0x2f, - } - - require.Equal(t, expect, out) -} - -func TestHashChunk2L(t *testing.T) { - data0 := make([]byte, 128) - data0[0] = 0x01 - - l1 := make([]byte, 64) - l2 := make([]byte, 32) - - data := [][]byte{data0, l1, l2} - hashChunk(data) - - // 16 ab ab 34 1f b7 f3 70 e2 7e 4d ad cf 81 76 6d - // d0 df d0 ae 64 46 94 77 bb 2c f6 61 49 38 b2 2f - expectL1Left := []byte{ - 0x16, 0xab, 0xab, 0x34, 0x1f, 0xb7, 0xf3, 0x70, - 0xe2, 0x7e, 0x4d, 0xad, 0xcf, 0x81, 0x76, 0x6d, - 0xd0, 0xdf, 0xd0, 0xae, 0x64, 0x46, 0x94, 0x77, - 0xbb, 0x2c, 0xf6, 0x61, 0x49, 0x38, 0xb2, 0x2f, - } - - // f5 a5 fd 42 d1 6a 20 30 27 98 ef 6e d3 09 97 9b - // 43 00 3d 23 20 d9 f0 e8 ea 98 31 a9 27 59 fb 0b - expectL1Rest := []byte{ - 0xf5, 0xa5, 0xfd, 0x42, 0xd1, 0x6a, 0x20, 0x30, - 0x27, 0x98, 0xef, 0x6e, 0xd3, 0x09, 0x97, 0x9b, - 0x43, 0x00, 0x3d, 0x23, 0x20, 0xd9, 0xf0, 0xe8, - 0xea, 0x98, 0x31, 0xa9, 0x27, 0x59, 0xfb, 0x0b, - } - - require.Equal(t, expectL1Left, l1[:32]) - require.Equal(t, expectL1Rest, l1[32:]) - - // 0d d6 da e4 1c 2f 75 55 01 29 59 4f b6 44 e4 a8 - // 42 cf af b3 16 a2 d5 93 21 e3 88 fe 84 a1 ec 2f - expectL2 := []byte{ - 0x0d, 0xd6, 0xda, 0xe4, 0x1c, 0x2f, 0x75, 0x55, - 0x01, 0x29, 0x59, 0x4f, 0xb6, 0x44, 0xe4, 0xa8, - 0x42, 0xcf, 0xaf, 0xb3, 0x16, 0xa2, 0xd5, 0x93, - 0x21, 0xe3, 0x88, 0xfe, 0x84, 0xa1, 0xec, 0x2f, - } - - require.Equal(t, expectL2, l2) -} - -func Test2K(t *testing.T) { - data := make([]byte, 2048) - data[0] = 0x01 - - tempFile := filepath.Join(t.TempDir(), "tree.dat") - - commd, err := BuildTreeD(bytes.NewReader(data), false, tempFile, 2048) - require.NoError(t, err) - fmt.Println(commd) - - // dump tree.dat - dat, err := os.ReadFile(tempFile) - require.NoError(t, err) - - for i, b := range dat { - // 32 values per line - if i%32 == 0 { - fmt.Println() - - // line offset hexdump style - fmt.Printf("%04x: ", i) - } - fmt.Printf("%02x ", b) - } - fmt.Println() - - require.Equal(t, "baga6ea4seaqovgk4kr4eoifujh6jfmdqvw3m6zrvyjqzu6s6abkketui6jjoydi", commd.String()) - -} - -const expectD8M = `00000000: 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 -00000020: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 -* -00800000: 16 ab ab 34 1f b7 f3 70 e2 7e 4d ad cf 81 76 6d d0 df d0 ae 64 46 94 77 bb 2c f6 61 49 38 b2 2f -00800020: f5 a5 fd 42 d1 6a 20 30 27 98 ef 6e d3 09 97 9b 43 00 3d 23 20 d9 f0 e8 ea 98 31 a9 27 59 fb 0b -* -00c00000: 0d d6 da e4 1c 2f 75 55 01 29 59 4f b6 44 e4 a8 42 cf af b3 16 a2 d5 93 21 e3 88 fe 84 a1 ec 2f -00c00020: 37 31 bb 99 ac 68 9f 66 ee f5 97 3e 4a 94 da 18 8f 4d dc ae 58 07 24 fc 6f 3f d6 0d fd 48 83 33 -* -00e00000: 11 b1 c4 80 05 21 d5 e5 83 4a de b3 70 7c 74 15 9f f3 37 b0 96 16 3c 94 31 16 73 40 e7 b1 17 1d -00e00020: 64 2a 60 7e f8 86 b0 04 bf 2c 19 78 46 3a e1 d4 69 3a c0 f4 10 eb 2d 1b 7a 47 fe 20 5e 5e 75 0f -* -00f00000: ec 69 25 55 9b cc 52 84 0a 22 38 5b 2b 6b 35 b4 50 14 50 04 28 f4 59 fe c1 23 01 0f e7 ef 18 1c -00f00020: 57 a2 38 1a 28 65 2b f4 7f 6b ef 7a ca 67 9b e4 ae de 58 71 ab 5c f3 eb 2c 08 11 44 88 cb 85 26 -* -00f80000: 3d d2 eb 19 3e e2 f0 47 34 87 bf 4b 83 aa 3a bd a9 c8 4e fa e5 52 6d 8a fd 61 2d 5d 9e 3d 79 34 -00f80020: 1f 7a c9 59 55 10 e0 9e a4 1c 46 0b 17 64 30 bb 32 2c d6 fb 41 2e c5 7c b1 7d 98 9a 43 10 37 2f -* -00fc0000: ea 99 5c 54 78 47 20 b4 49 fc 92 b0 70 ad b6 cf 66 35 c2 61 9a 7a 5e 00 54 a2 4e 88 f2 52 ec 0d -00fc0020: fc 7e 92 82 96 e5 16 fa ad e9 86 b2 8f 92 d4 4a 4f 24 b9 35 48 52 23 37 6a 79 90 27 bc 18 f8 33 -* -00fe0000: b9 97 02 8b 06 d7 2e 96 07 86 79 58 e1 5f 8d 07 b7 ae 37 ab 29 ab 3f a9 de fe c9 8e aa 37 6e 28 -00fe0020: 08 c4 7b 38 ee 13 bc 43 f4 1b 91 5c 0e ed 99 11 a2 60 86 b3 ed 62 40 1b f9 d5 8b 8d 19 df f6 24 -* -00ff0000: a0 c4 4f 7b a4 4c d2 3c 2e bf 75 98 7b e8 98 a5 63 80 73 b2 f9 11 cf ee ce 14 5a 77 58 0c 6c 12 -00ff0020: b2 e4 7b fb 11 fa cd 94 1f 62 af 5c 75 0f 3e a5 cc 4d f5 17 d5 c4 f1 6d b2 b4 d7 7b ae c1 a3 2f -* -00ff8000: 89 2d 2b 00 a5 c1 54 10 94 ca 65 de 21 3b bd 45 90 14 15 ed d1 10 17 cd 29 f3 ed 75 73 02 a0 3f -00ff8020: f9 22 61 60 c8 f9 27 bf dc c4 18 cd f2 03 49 31 46 00 8e ae fb 7d 02 19 4d 5e 54 81 89 00 51 08 -* -00ffc000: 22 48 54 8b ba a5 8f e2 db 0b 07 18 c1 d7 20 1f ed 64 c7 8d 7d 22 88 36 b2 a1 b2 f9 42 0b ef 3c -00ffc020: 2c 1a 96 4b b9 0b 59 eb fe 0f 6d a2 9a d6 5a e3 e4 17 72 4a 8f 7c 11 74 5a 40 ca c1 e5 e7 40 11 -* -00ffe000: 1c 6a 48 08 3e 17 49 90 ef c0 56 ec b1 44 75 1d e2 76 d8 a5 1c 3d 93 d7 4c 81 92 48 ab 78 cc 30 -00ffe020: fe e3 78 ce f1 64 04 b1 99 ed e0 b1 3e 11 b6 24 ff 9d 78 4f bb ed 87 8d 83 29 7e 79 5e 02 4f 02 -* -00fff000: 0a b4 26 38 1b 72 cd 3b b3 e3 c7 82 18 fe 1f 18 3b 3a 19 db c4 d9 26 94 30 03 cd 01 b6 d1 8d 0b -00fff020: 8e 9e 24 03 fa 88 4c f6 23 7f 60 df 25 f8 3e e4 0d ca 9e d8 79 eb 6f 63 52 d1 50 84 f5 ad 0d 3f -* -00fff800: 16 0d 87 17 1b e7 ae e4 20 a3 54 24 cf df 4f fe a2 fd 7b 94 58 89 58 f3 45 11 57 fc 39 8f 34 26 -00fff820: 75 2d 96 93 fa 16 75 24 39 54 76 e3 17 a9 85 80 f0 09 47 af b7 a3 05 40 d6 25 a9 29 1c c1 2a 07 -* -00fffc00: 1f 40 60 11 da 08 f8 09 80 63 97 dc 1c 57 b9 87 83 37 5a 59 5d d6 81 42 6c 1e cd d4 3c ab e3 3c -00fffc20: 70 22 f6 0f 7e f6 ad fa 17 11 7a 52 61 9e 30 ce a8 2c 68 07 5a df 1c 66 77 86 ec 50 6e ef 2d 19 -* -00fffe00: 51 4e dd 2f 6f 8f 6d fd 54 b0 d1 20 7b b7 06 df 85 c5 a3 19 0e af 38 72 37 20 c5 07 56 67 7f 14 -00fffe20: d9 98 87 b9 73 57 3a 96 e1 13 93 64 52 36 c1 7b 1f 4c 70 34 d7 23 c7 a9 9f 70 9b b4 da 61 16 2b -* -00ffff00: 5a 1d 84 74 85 a3 4b 28 08 93 a9 cf b2 8b 54 44 67 12 8b eb c0 22 bd de c1 04 be ca b4 f4 81 31 -00ffff20: d0 b5 30 db b0 b4 f2 5c 5d 2f 2a 28 df ee 80 8b 53 41 2a 02 93 1f 18 c4 99 f5 a2 54 08 6b 13 26 -* -00ffff80: c5 fb f3 f9 4c c2 2b 3c 51 ad c1 ea af e9 4b a0 9f b2 73 f3 73 d2 10 1f 12 0b 11 c6 85 21 66 2f -00ffffa0: 84 c0 42 1b a0 68 5a 01 bf 79 5a 23 44 06 4f e4 24 bd 52 a9 d2 43 77 b3 94 ff 4c 4b 45 68 e8 11 -00ffffc0: 23 40 4a 88 80 f9 cb c7 20 39 cb 86 14 35 9c 28 34 84 55 70 fe 95 19 0b bd 4d 93 41 42 e8 25 2c -` - -func Test8MiB(t *testing.T) { - data := make([]byte, 8<<20) - data[0] = 0x01 - - tempFile := filepath.Join(t.TempDir(), "tree.dat") - - commd, err := BuildTreeD(bytes.NewReader(data), false, tempFile, 8<<20) - require.NoError(t, err) - fmt.Println(commd) - - // dump tree.dat - dat, err := os.ReadFile(tempFile) - require.NoError(t, err) - - actualD := hexPrint32LDedup(bytes.NewReader(dat)) - fmt.Println(actualD) - - require.EqualValues(t, expectD8M, actualD) - require.Equal(t, "baga6ea4seaqcgqckrcapts6hea44xbqugwocqneekvyp5fizbo6u3e2biluckla", commd.String()) -} - -func Test8MiBUnpad(t *testing.T) { - data := make([]byte, abi.PaddedPieceSize(8<<20).Unpadded()) - data[0] = 0x01 - - tempFile := filepath.Join(t.TempDir(), "tree.dat") - - commd, err := BuildTreeD(bytes.NewReader(data), true, tempFile, 8<<20) - require.NoError(t, err) - fmt.Println(commd) - - // dump tree.dat - dat, err := os.ReadFile(tempFile) - require.NoError(t, err) - - actualD := hexPrint32LDedup(bytes.NewReader(dat)) - fmt.Println(actualD) - - require.EqualValues(t, expectD8M, actualD) - require.Equal(t, "baga6ea4seaqcgqckrcapts6hea44xbqugwocqneekvyp5fizbo6u3e2biluckla", commd.String()) -} - -/*func Test32Golden(t *testing.T) { - datFile, err := os.Open("../../seal/cac/sc-02-data-tree-d.dat") - require.NoError(t, err) - - bufReader := bufio.NewReaderSize(datFile, 1<<20) - - actualD := hexPrint32LDedup(bufReader) - fmt.Println(actualD) -} -*/ - -var expect32Null = `00000000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 -* -800000000: f5 a5 fd 42 d1 6a 20 30 27 98 ef 6e d3 09 97 9b 43 00 3d 23 20 d9 f0 e8 ea 98 31 a9 27 59 fb 0b -* -c00000000: 37 31 bb 99 ac 68 9f 66 ee f5 97 3e 4a 94 da 18 8f 4d dc ae 58 07 24 fc 6f 3f d6 0d fd 48 83 33 -* -e00000000: 64 2a 60 7e f8 86 b0 04 bf 2c 19 78 46 3a e1 d4 69 3a c0 f4 10 eb 2d 1b 7a 47 fe 20 5e 5e 75 0f -* -f00000000: 57 a2 38 1a 28 65 2b f4 7f 6b ef 7a ca 67 9b e4 ae de 58 71 ab 5c f3 eb 2c 08 11 44 88 cb 85 26 -* -f80000000: 1f 7a c9 59 55 10 e0 9e a4 1c 46 0b 17 64 30 bb 32 2c d6 fb 41 2e c5 7c b1 7d 98 9a 43 10 37 2f -* -fc0000000: fc 7e 92 82 96 e5 16 fa ad e9 86 b2 8f 92 d4 4a 4f 24 b9 35 48 52 23 37 6a 79 90 27 bc 18 f8 33 -* -fe0000000: 08 c4 7b 38 ee 13 bc 43 f4 1b 91 5c 0e ed 99 11 a2 60 86 b3 ed 62 40 1b f9 d5 8b 8d 19 df f6 24 -* -ff0000000: b2 e4 7b fb 11 fa cd 94 1f 62 af 5c 75 0f 3e a5 cc 4d f5 17 d5 c4 f1 6d b2 b4 d7 7b ae c1 a3 2f -* -ff8000000: f9 22 61 60 c8 f9 27 bf dc c4 18 cd f2 03 49 31 46 00 8e ae fb 7d 02 19 4d 5e 54 81 89 00 51 08 -* -ffc000000: 2c 1a 96 4b b9 0b 59 eb fe 0f 6d a2 9a d6 5a e3 e4 17 72 4a 8f 7c 11 74 5a 40 ca c1 e5 e7 40 11 -* -ffe000000: fe e3 78 ce f1 64 04 b1 99 ed e0 b1 3e 11 b6 24 ff 9d 78 4f bb ed 87 8d 83 29 7e 79 5e 02 4f 02 -* -fff000000: 8e 9e 24 03 fa 88 4c f6 23 7f 60 df 25 f8 3e e4 0d ca 9e d8 79 eb 6f 63 52 d1 50 84 f5 ad 0d 3f -* -fff800000: 75 2d 96 93 fa 16 75 24 39 54 76 e3 17 a9 85 80 f0 09 47 af b7 a3 05 40 d6 25 a9 29 1c c1 2a 07 -* -fffc00000: 70 22 f6 0f 7e f6 ad fa 17 11 7a 52 61 9e 30 ce a8 2c 68 07 5a df 1c 66 77 86 ec 50 6e ef 2d 19 -* -fffe00000: d9 98 87 b9 73 57 3a 96 e1 13 93 64 52 36 c1 7b 1f 4c 70 34 d7 23 c7 a9 9f 70 9b b4 da 61 16 2b -* -ffff00000: d0 b5 30 db b0 b4 f2 5c 5d 2f 2a 28 df ee 80 8b 53 41 2a 02 93 1f 18 c4 99 f5 a2 54 08 6b 13 26 -* -ffff80000: 84 c0 42 1b a0 68 5a 01 bf 79 5a 23 44 06 4f e4 24 bd 52 a9 d2 43 77 b3 94 ff 4c 4b 45 68 e8 11 -* -ffffc0000: 65 f2 9e 5d 98 d2 46 c3 8b 38 8c fc 06 db 1f 6b 02 13 03 c5 a2 89 00 0b dc e8 32 a9 c3 ec 42 1c -* -ffffe0000: a2 24 75 08 28 58 50 96 5b 7e 33 4b 31 27 b0 c0 42 b1 d0 46 dc 54 40 21 37 62 7c d8 79 9c e1 3a -* -fffff0000: da fd ab 6d a9 36 44 53 c2 6d 33 72 6b 9f ef e3 43 be 8f 81 64 9e c0 09 aa d3 fa ff 50 61 75 08 -* -fffff8000: d9 41 d5 e0 d6 31 4a 99 5c 33 ff bd 4f be 69 11 8d 73 d4 e5 fd 2c d3 1f 0f 7c 86 eb dd 14 e7 06 -* -fffffc000: 51 4c 43 5c 3d 04 d3 49 a5 36 5f bd 59 ff c7 13 62 91 11 78 59 91 c1 a3 c5 3a f2 20 79 74 1a 2f -* -fffffe000: ad 06 85 39 69 d3 7d 34 ff 08 e0 9f 56 93 0a 4a d1 9a 89 de f6 0c bf ee 7e 1d 33 81 c1 e7 1c 37 -* -ffffff000: 39 56 0e 7b 13 a9 3b 07 a2 43 fd 27 20 ff a7 cb 3e 1d 2e 50 5a b3 62 9e 79 f4 63 13 51 2c da 06 -* -ffffff800: cc c3 c0 12 f5 b0 5e 81 1a 2b bf dd 0f 68 33 b8 42 75 b4 7b f2 29 c0 05 2a 82 48 4f 3c 1a 5b 3d -* -ffffffc00: 7d f2 9b 69 77 31 99 e8 f2 b4 0b 77 91 9d 04 85 09 ee d7 68 e2 c7 29 7b 1f 14 37 03 4f c3 c6 2c -* -ffffffe00: 66 ce 05 a3 66 75 52 cf 45 c0 2b cc 4e 83 92 91 9b de ac 35 de 2f f5 62 71 84 8e 9f 7b 67 51 07 -* -fffffff00: d8 61 02 18 42 5a b5 e9 5b 1c a6 23 9d 29 a2 e4 20 d7 06 a9 6f 37 3e 2f 9c 9a 91 d7 59 d1 9b 01 -* -fffffff80: 6d 36 4b 1e f8 46 44 1a 5a 4a 68 86 23 14 ac c0 a4 6f 01 67 17 e5 34 43 e8 39 ee df 83 c2 85 3c -* -fffffffc0: 07 7e 5f de 35 c5 0a 93 03 a5 50 09 e3 49 8a 4e be df f3 9c 42 b7 10 b7 30 d8 ec 7a c7 af a6 3e -` - -func Test32G(t *testing.T) { - if os.Getenv("LOTUS_TEST_LARGE_SECTORS") != "1" { - t.Skip("skipping large sector test without env LOTUS_TEST_LARGE_SECTORS=1") - } - - data := nullreader.NewNullReader(abi.PaddedPieceSize(32 << 30).Unpadded()) - - tempFile := filepath.Join(t.TempDir(), "tree.dat") - - commd, err := BuildTreeD(data, true, tempFile, 32<<30) - require.NoError(t, err) - fmt.Println(commd) - - // dump tree.dat - datFile, err := os.Open(tempFile) - require.NoError(t, err) - defer func() { - require.NoError(t, datFile.Close()) - }() - - actualD := hexPrint32LDedup(bufio.NewReaderSize(datFile, 1<<20)) - fmt.Println(actualD) - - require.EqualValues(t, expect32Null, actualD) - require.Equal(t, "baga6ea4seaqao7s73y24kcutaosvacpdjgfe5pw76ooefnyqw4ynr3d2y6x2mpq", commd.String()) -} - -func hexPrint32LDedup(r io.Reader) string { - var prevLine []byte - var outStr string - var duplicateLine bool - buffer := make([]byte, 32) - offset := 0 - - for { - n, err := r.Read(buffer) - if err == io.EOF { - break - } - if err != nil { - // Handle the error according to your application's requirements - fmt.Println("Error reading:", err) - break - } - - if string(prevLine) == string(buffer) { - // Mark as duplicate and skip processing - duplicateLine = true - } else { - if duplicateLine { - // Output a marker for the previous duplicate line - outStr += "*\n" - duplicateLine = false - } - // Convert to hex and output - outStr += fmt.Sprintf("%08x: %s\n", offset, toHex(buffer)) - - // Update prevLine - if len(prevLine) != 32 { - prevLine = make([]byte, 32) - } - copy(prevLine, buffer) - } - - offset += n - } - - // If the last line was a duplicate, ensure we mark it - if duplicateLine { - outStr += "*\n" - } - - return outStr -} - -func toHex(data []byte) string { - var hexStr string - for _, b := range data { - hexStr += fmt.Sprintf("%02x ", b) - } - return hexStr -} - -func BenchmarkHashChunk(b *testing.B) { - const benchSize = 1024 * 1024 - - // Generate 1 MiB of random data - randomData := make([]byte, benchSize) - if _, err := rand.Read(randomData); err != nil { - b.Fatalf("Failed to generate random data: %v", err) - } - - // Prepare data structure for hashChunk - data := make([][]byte, 1) - data[0] = randomData - - // append levels until we get to a 32 byte level - for len(data[len(data)-1]) > 32 { - newLevel := make([]byte, len(data[len(data)-1])/2) - data = append(data, newLevel) - } - - b.SetBytes(benchSize) // Set the number of bytes for the benchmark - - b.ResetTimer() // Start the timer after setup - - for i := 0; i < b.N; i++ { - hashChunk(data) - // Use the result in some way to avoid compiler optimization - _ = data[1] - } -} - -func BenchmarkBuildTreeD512M(b *testing.B) { - const dataSize = 512 * 1024 * 1024 // 512 MiB - - // Generate 512 MiB of random data - data := make([]byte, dataSize) - if _, err := rand.Read(data); err != nil { - b.Fatalf("Failed to generate random data: %v", err) - } - - // preallocate NumCPU+1 1MiB/512k/256k/... - // with Pool.Get / Pool.Put, so that they are in the pool - { - nc := runtime.NumCPU() - bufs := [][]byte{} - for i := 0; i < nc+1; i++ { - for sz := 1 << 20; sz > 32; sz >>= 1 { - b := pool.Get(sz) - bufs = append(bufs, b) - } - } - for _, b := range bufs { - pool.Put(b) - } - } - - /*if b.N == 1 { - b.N = 10 - }*/ - - b.SetBytes(int64(dataSize)) // Set the number of bytes for the benchmark - - for i := 0; i < b.N; i++ { - // Create a temporary file for each iteration - tempFile, err := os.CreateTemp("", "tree.dat") - if err != nil { - b.Fatalf("Failed to create temporary file: %v", err) - } - tempFilePath := tempFile.Name() - err = tempFile.Close() - if err != nil { - b.Fatalf("Failed to close temporary file: %v", err) - } - - b.StartTimer() // Start the timer for the BuildTreeD operation - - _, err = BuildTreeD(bytes.NewReader(data), false, tempFilePath, dataSize) - if err != nil { - b.Fatalf("BuildTreeD failed: %v", err) - } - - b.StopTimer() // Stop the timer after BuildTreeD completes - - // Clean up the temporary file - err = os.Remove(tempFilePath) - if err != nil { - b.Fatalf("Failed to remove temporary file: %v", err) - } - } -} - -func TestLayerOffset(t *testing.T) { - { - size := uint64(2048) - - require.Equal(t, uint64(0), layerOffset(size, 0)) - require.Equal(t, size, layerOffset(size, 1)) - require.Equal(t, size+(size/2), layerOffset(size, 2)) - require.Equal(t, size+(size/2)+(size/4), layerOffset(size, 3)) - require.Equal(t, size+(size/2)+(size/4)+(size/8), layerOffset(size, 4)) - require.Equal(t, size+(size/2)+(size/4)+(size/8)+(size/16), layerOffset(size, 5)) - } - - { - size := uint64(32 << 30) - maxLayers := 30 - - for i := 0; i <= maxLayers; i++ { - var expect uint64 - for j := 0; j < i; j++ { - expect += size >> uint64(j) - } - - fmt.Printf("layer %d: %d\n", i, expect) - require.Equal(t, expect, layerOffset(size, i)) - } - } - - { - size := uint64(64 << 30) - maxLayers := 31 - - for i := 0; i <= maxLayers; i++ { - var expect uint64 - for j := 0; j < i; j++ { - expect += size >> uint64(j) - } - - fmt.Printf("layer %d: %d\n", i, expect) - require.Equal(t, expect, layerOffset(size, i)) - } - } -} diff --git a/curiosrc/seal/README.md b/curiosrc/seal/README.md deleted file mode 100644 index b148e4204d1..00000000000 --- a/curiosrc/seal/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# Curio Sealer - -## Overview - -The Curio sealer is a collection of harmony tasks and a common poller -which implement the sealing functionality of the Filecoin protocol. - -## Pipeline Tasks - -* SDR pipeline - * `SDR` - Generate SDR layers - * `SDRTrees` - Generate tree files (TreeD, TreeR, TreeC) - * `PreCommitSubmit` - Submit precommit message to the network - * `PoRep` - Generate PoRep proof - * `CommitSubmit` - Submit commit message to the network - -# Poller - -The poller is a background process running on every node which runs any of the -SDR pipeline tasks. It periodically checks the state of sectors in the SDR pipeline -and schedules any tasks to run which will move the sector along the pipeline. - -# Error Handling - -* Pipeline tasks are expected to always finish successfully as harmonytask tasks. - If a sealing task encounters an error, it should mark the sector pipeline entry - as failed and exit without erroring. The poller will then figure out a recovery - strategy for the sector. diff --git a/curiosrc/seal/finalize_pieces.go b/curiosrc/seal/finalize_pieces.go deleted file mode 100644 index 354eed1413e..00000000000 --- a/curiosrc/seal/finalize_pieces.go +++ /dev/null @@ -1,51 +0,0 @@ -package seal - -import ( - "context" - "net/url" - "strconv" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" -) - -func DropSectorPieceRefs(ctx context.Context, db *harmonydb.DB, sid abi.SectorID) error { - //_, err := db.Exec(ctx, `SELECT FROM sectors_sdr_initial_pieces WHERE sp_id = $1 AND sector_number = $2`, sid.Miner, sid.Number) - - var PieceURL []struct { - URL string `db:"data_url"` - } - - err := db.Select(ctx, &PieceURL, `SELECT data_url FROM sectors_sdr_initial_pieces WHERE sp_id = $1 AND sector_number = $2`, sid.Miner, sid.Number) - if err != nil { - return xerrors.Errorf("getting piece url: %w", err) - } - - for _, pu := range PieceURL { - gourl, err := url.Parse(pu.URL) - if err != nil { - log.Errorw("failed to parse piece url", "url", pu.URL, "error", err, "miner", sid.Miner, "sector", sid.Number) - continue - } - - if gourl.Scheme == "pieceref" { - refID, err := strconv.ParseInt(gourl.Opaque, 10, 64) - if err != nil { - log.Errorw("failed to parse piece ref id", "url", pu.URL, "error", err, "miner", sid.Miner, "sector", sid.Number) - continue - } - - n, err := db.Exec(ctx, `DELETE FROM parked_piece_refs WHERE ref_id = $1`, refID) - if err != nil { - log.Errorw("failed to delete piece ref", "url", pu.URL, "error", err, "miner", sid.Miner, "sector", sid.Number) - } - - log.Debugw("deleted piece ref", "url", pu.URL, "miner", sid.Miner, "sector", sid.Number, "rows", n) - } - } - - return err -} diff --git a/curiosrc/seal/poller.go b/curiosrc/seal/poller.go deleted file mode 100644 index 568280bdbce..00000000000 --- a/curiosrc/seal/poller.go +++ /dev/null @@ -1,285 +0,0 @@ -package seal - -import ( - "context" - "time" - - logging "github.com/ipfs/go-log/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/promise" -) - -var log = logging.Logger("lpseal") - -const ( - pollerSDR = iota - pollerTrees - pollerPrecommitMsg - pollerPoRep - pollerCommitMsg - pollerFinalize - pollerMoveStorage - - numPollers -) - -const sealPollerInterval = 10 * time.Second -const seedEpochConfidence = 3 - -type SealPollerAPI interface { - StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorPreCommitOnChainInfo, error) - StateSectorGetInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) - ChainHead(context.Context) (*types.TipSet, error) -} - -type SealPoller struct { - db *harmonydb.DB - api SealPollerAPI - - pollers [numPollers]promise.Promise[harmonytask.AddTaskFunc] -} - -func NewPoller(db *harmonydb.DB, api SealPollerAPI) *SealPoller { - return &SealPoller{ - db: db, - api: api, - } -} - -func (s *SealPoller) RunPoller(ctx context.Context) { - ticker := time.NewTicker(sealPollerInterval) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - if err := s.poll(ctx); err != nil { - log.Errorw("polling failed", "error", err) - } - } - } -} - -/* -NOTE: TaskIDs are ONLY set while the tasks are executing or waiting to execute. - This means that there are ~4 states each task can be in: -* Not run, and dependencies not solved (dependencies are 'After' fields of previous stages), task is null, After is false -* Not run, and dependencies solved, task is null, After is false -* Running or queued, task is set, After is false -* Finished, task is null, After is true -*/ - -type pollTask struct { - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - - TaskSDR *int64 `db:"task_id_sdr"` - AfterSDR bool `db:"after_sdr"` - - TaskTreeD *int64 `db:"task_id_tree_d"` - AfterTreeD bool `db:"after_tree_d"` - - TaskTreeC *int64 `db:"task_id_tree_c"` - AfterTreeC bool `db:"after_tree_c"` - - TaskTreeR *int64 `db:"task_id_tree_r"` - AfterTreeR bool `db:"after_tree_r"` - - TaskPrecommitMsg *int64 `db:"task_id_precommit_msg"` - AfterPrecommitMsg bool `db:"after_precommit_msg"` - - AfterPrecommitMsgSuccess bool `db:"after_precommit_msg_success"` - SeedEpoch *int64 `db:"seed_epoch"` - - TaskPoRep *int64 `db:"task_id_porep"` - PoRepProof []byte `db:"porep_proof"` - AfterPoRep bool `db:"after_porep"` - - TaskFinalize *int64 `db:"task_id_finalize"` - AfterFinalize bool `db:"after_finalize"` - - TaskMoveStorage *int64 `db:"task_id_move_storage"` - AfterMoveStorage bool `db:"after_move_storage"` - - TaskCommitMsg *int64 `db:"task_id_commit_msg"` - AfterCommitMsg bool `db:"after_commit_msg"` - - AfterCommitMsgSuccess bool `db:"after_commit_msg_success"` - - Failed bool `db:"failed"` - FailedReason string `db:"failed_reason"` -} - -func (s *SealPoller) poll(ctx context.Context) error { - var tasks []pollTask - - err := s.db.Select(ctx, &tasks, `SELECT - sp_id, sector_number, - task_id_sdr, after_sdr, - task_id_tree_d, after_tree_d, - task_id_tree_c, after_tree_c, - task_id_tree_r, after_tree_r, - task_id_precommit_msg, after_precommit_msg, - after_precommit_msg_success, seed_epoch, - task_id_porep, porep_proof, after_porep, - task_id_finalize, after_finalize, - task_id_move_storage, after_move_storage, - task_id_commit_msg, after_commit_msg, - after_commit_msg_success, - failed, failed_reason - FROM sectors_sdr_pipeline WHERE after_commit_msg_success != TRUE OR after_move_storage != TRUE`) - if err != nil { - return err - } - - for _, task := range tasks { - task := task - if task.Failed { - continue - } - - ts, err := s.api.ChainHead(ctx) - if err != nil { - return xerrors.Errorf("getting chain head: %w", err) - } - - s.pollStartSDR(ctx, task) - s.pollStartSDRTrees(ctx, task) - s.pollStartPrecommitMsg(ctx, task) - s.mustPoll(s.pollPrecommitMsgLanded(ctx, task)) - s.pollStartPoRep(ctx, task, ts) - s.pollStartFinalize(ctx, task, ts) - s.pollStartMoveStorage(ctx, task) - s.pollStartCommitMsg(ctx, task) - s.mustPoll(s.pollCommitMsgLanded(ctx, task)) - } - - return nil -} - -func (s *SealPoller) pollStartSDR(ctx context.Context, task pollTask) { - if !task.AfterSDR && task.TaskSDR == nil && s.pollers[pollerSDR].IsSet() { - s.pollers[pollerSDR].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { - n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_sdr = $1 WHERE sp_id = $2 AND sector_number = $3 AND task_id_sdr IS NULL`, id, task.SpID, task.SectorNumber) - if err != nil { - return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err) - } - if n != 1 { - return false, xerrors.Errorf("expected to update 1 row, updated %d", n) - } - - return true, nil - }) - } -} - -func (t pollTask) afterSDR() bool { - return t.AfterSDR -} - -func (s *SealPoller) pollStartSDRTrees(ctx context.Context, task pollTask) { - if !task.AfterTreeD && !task.AfterTreeC && !task.AfterTreeR && - task.TaskTreeD == nil && task.TaskTreeC == nil && task.TaskTreeR == nil && - s.pollers[pollerTrees].IsSet() && task.AfterSDR { - - s.pollers[pollerTrees].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { - n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_tree_d = $1, task_id_tree_c = $1, task_id_tree_r = $1 - WHERE sp_id = $2 AND sector_number = $3 AND after_sdr = TRUE AND task_id_tree_d IS NULL AND task_id_tree_c IS NULL AND task_id_tree_r IS NULL`, id, task.SpID, task.SectorNumber) - if err != nil { - return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err) - } - if n != 1 { - return false, xerrors.Errorf("expected to update 1 row, updated %d", n) - } - - return true, nil - }) - } -} - -func (t pollTask) afterTrees() bool { - return t.AfterTreeD && t.AfterTreeC && t.AfterTreeR && t.afterSDR() -} - -func (t pollTask) afterPrecommitMsg() bool { - return t.AfterPrecommitMsg && t.afterTrees() -} - -func (t pollTask) afterPrecommitMsgSuccess() bool { - return t.AfterPrecommitMsgSuccess && t.afterPrecommitMsg() -} - -func (s *SealPoller) pollStartPoRep(ctx context.Context, task pollTask, ts *types.TipSet) { - if s.pollers[pollerPoRep].IsSet() && task.afterPrecommitMsgSuccess() && task.SeedEpoch != nil && - task.TaskPoRep == nil && !task.AfterPoRep && - ts.Height() >= abi.ChainEpoch(*task.SeedEpoch+seedEpochConfidence) { - - s.pollers[pollerPoRep].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { - n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_porep = $1 WHERE sp_id = $2 AND sector_number = $3 AND task_id_porep IS NULL`, id, task.SpID, task.SectorNumber) - if err != nil { - return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err) - } - if n != 1 { - return false, xerrors.Errorf("expected to update 1 row, updated %d", n) - } - - return true, nil - }) - } -} - -func (t pollTask) afterPoRep() bool { - return t.AfterPoRep && t.afterPrecommitMsgSuccess() -} - -func (s *SealPoller) pollStartFinalize(ctx context.Context, task pollTask, ts *types.TipSet) { - if s.pollers[pollerFinalize].IsSet() && task.afterPoRep() && !task.AfterFinalize && task.TaskFinalize == nil { - s.pollers[pollerFinalize].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { - n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_finalize = $1 WHERE sp_id = $2 AND sector_number = $3 AND task_id_finalize IS NULL`, id, task.SpID, task.SectorNumber) - if err != nil { - return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err) - } - if n != 1 { - return false, xerrors.Errorf("expected to update 1 row, updated %d", n) - } - - return true, nil - }) - } -} - -func (t pollTask) afterFinalize() bool { - return t.AfterFinalize && t.afterPoRep() -} - -func (s *SealPoller) pollStartMoveStorage(ctx context.Context, task pollTask) { - if s.pollers[pollerMoveStorage].IsSet() && task.afterFinalize() && !task.AfterMoveStorage && task.TaskMoveStorage == nil { - s.pollers[pollerMoveStorage].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { - n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_move_storage = $1 WHERE sp_id = $2 AND sector_number = $3 AND task_id_move_storage IS NULL`, id, task.SpID, task.SectorNumber) - if err != nil { - return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err) - } - if n != 1 { - return false, xerrors.Errorf("expected to update 1 row, updated %d", n) - } - - return true, nil - }) - } -} - -func (s *SealPoller) mustPoll(err error) { - if err != nil { - log.Errorw("poller operation failed", "error", err) - } -} diff --git a/curiosrc/seal/poller_commit_msg.go b/curiosrc/seal/poller_commit_msg.go deleted file mode 100644 index 9a88129b04e..00000000000 --- a/curiosrc/seal/poller_commit_msg.go +++ /dev/null @@ -1,108 +0,0 @@ -package seal - -import ( - "context" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/exitcode" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" -) - -func (s *SealPoller) pollStartCommitMsg(ctx context.Context, task pollTask) { - if task.afterPoRep() && len(task.PoRepProof) > 0 && task.TaskCommitMsg == nil && !task.AfterCommitMsg && s.pollers[pollerCommitMsg].IsSet() { - s.pollers[pollerCommitMsg].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { - n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_commit_msg = $1 WHERE sp_id = $2 AND sector_number = $3 AND task_id_commit_msg IS NULL`, id, task.SpID, task.SectorNumber) - if err != nil { - return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err) - } - if n != 1 { - return false, xerrors.Errorf("expected to update 1 row, updated %d", n) - } - - return true, nil - }) - } -} - -func (s *SealPoller) pollCommitMsgLanded(ctx context.Context, task pollTask) error { - if task.AfterCommitMsg && !task.AfterCommitMsgSuccess && s.pollers[pollerCommitMsg].IsSet() { - var execResult []dbExecResult - - err := s.db.Select(ctx, &execResult, `SELECT spipeline.precommit_msg_cid, spipeline.commit_msg_cid, executed_tsk_cid, executed_tsk_epoch, executed_msg_cid, executed_rcpt_exitcode, executed_rcpt_gas_used - FROM sectors_sdr_pipeline spipeline - JOIN message_waits ON spipeline.commit_msg_cid = message_waits.signed_message_cid - WHERE sp_id = $1 AND sector_number = $2 AND executed_tsk_epoch IS NOT NULL`, task.SpID, task.SectorNumber) - if err != nil { - log.Errorw("failed to query message_waits", "error", err) - } - - if len(execResult) > 0 { - maddr, err := address.NewIDAddress(uint64(task.SpID)) - if err != nil { - return err - } - - if exitcode.ExitCode(execResult[0].ExecutedRcptExitCode) != exitcode.Ok { - return s.pollCommitMsgFail(ctx, task, execResult[0]) - } - - si, err := s.api.StateSectorGetInfo(ctx, maddr, abi.SectorNumber(task.SectorNumber), types.EmptyTSK) - if err != nil { - return xerrors.Errorf("get sector info: %w", err) - } - - if si == nil { - log.Errorw("todo handle missing sector info (not found after cron)", "sp", task.SpID, "sector", task.SectorNumber, "exec_epoch", execResult[0].ExecutedTskEpoch, "exec_tskcid", execResult[0].ExecutedTskCID, "msg_cid", execResult[0].ExecutedMsgCID) - // todo handdle missing sector info (not found after cron) - } else { - // yay! - - _, err := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET - after_commit_msg_success = TRUE, commit_msg_tsk = $1 - WHERE sp_id = $2 AND sector_number = $3 AND after_commit_msg_success = FALSE`, - execResult[0].ExecutedTskCID, task.SpID, task.SectorNumber) - if err != nil { - return xerrors.Errorf("update sectors_sdr_pipeline: %w", err) - } - } - } - } - - return nil -} - -func (s *SealPoller) pollCommitMsgFail(ctx context.Context, task pollTask, execResult dbExecResult) error { - switch exitcode.ExitCode(execResult.ExecutedRcptExitCode) { - case exitcode.SysErrInsufficientFunds: - fallthrough - case exitcode.SysErrOutOfGas: - // just retry - return s.pollRetryCommitMsgSend(ctx, task, execResult) - default: - return xerrors.Errorf("commit message failed with exit code %s", exitcode.ExitCode(execResult.ExecutedRcptExitCode)) - } -} - -func (s *SealPoller) pollRetryCommitMsgSend(ctx context.Context, task pollTask, execResult dbExecResult) error { - if execResult.CommitMsgCID == nil { - return xerrors.Errorf("commit msg cid was nil") - } - - // make the pipeline entry seem like precommit send didn't happen, next poll loop will retry - - _, err := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET - commit_msg_cid = NULL, task_id_commit_msg = NULL, after_commit_msg = FALSE - WHERE commit_msg_cid = $1 AND sp_id = $2 AND sector_number = $3 AND after_commit_msg_success = FALSE`, - *execResult.CommitMsgCID, task.SpID, task.SectorNumber) - if err != nil { - return xerrors.Errorf("update sectors_sdr_pipeline to retry precommit msg send: %w", err) - } - - return nil -} diff --git a/curiosrc/seal/poller_precommit_msg.go b/curiosrc/seal/poller_precommit_msg.go deleted file mode 100644 index 4372cbb9223..00000000000 --- a/curiosrc/seal/poller_precommit_msg.go +++ /dev/null @@ -1,119 +0,0 @@ -package seal - -import ( - "context" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/exitcode" - - "github.com/filecoin-project/lotus/chain/actors/policy" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" -) - -func (s *SealPoller) pollStartPrecommitMsg(ctx context.Context, task pollTask) { - if task.TaskPrecommitMsg == nil && !task.AfterPrecommitMsg && task.afterTrees() && s.pollers[pollerPrecommitMsg].IsSet() { - s.pollers[pollerPrecommitMsg].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { - n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_precommit_msg = $1 WHERE sp_id = $2 AND sector_number = $3 AND task_id_precommit_msg IS NULL AND after_tree_r = TRUE AND after_tree_d = TRUE`, id, task.SpID, task.SectorNumber) - if err != nil { - return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err) - } - if n != 1 { - return false, xerrors.Errorf("expected to update 1 row, updated %d", n) - } - - return true, nil - }) - } -} - -type dbExecResult struct { - PrecommitMsgCID *string `db:"precommit_msg_cid"` - CommitMsgCID *string `db:"commit_msg_cid"` - - ExecutedTskCID string `db:"executed_tsk_cid"` - ExecutedTskEpoch int64 `db:"executed_tsk_epoch"` - ExecutedMsgCID string `db:"executed_msg_cid"` - - ExecutedRcptExitCode int64 `db:"executed_rcpt_exitcode"` - ExecutedRcptGasUsed int64 `db:"executed_rcpt_gas_used"` -} - -func (s *SealPoller) pollPrecommitMsgLanded(ctx context.Context, task pollTask) error { - if task.AfterPrecommitMsg && !task.AfterPrecommitMsgSuccess { - var execResult []dbExecResult - - err := s.db.Select(ctx, &execResult, `SELECT spipeline.precommit_msg_cid, spipeline.commit_msg_cid, executed_tsk_cid, executed_tsk_epoch, executed_msg_cid, executed_rcpt_exitcode, executed_rcpt_gas_used - FROM sectors_sdr_pipeline spipeline - JOIN message_waits ON spipeline.precommit_msg_cid = message_waits.signed_message_cid - WHERE sp_id = $1 AND sector_number = $2 AND executed_tsk_epoch IS NOT NULL`, task.SpID, task.SectorNumber) - if err != nil { - log.Errorw("failed to query message_waits", "error", err) - } - - if len(execResult) > 0 { - if exitcode.ExitCode(execResult[0].ExecutedRcptExitCode) != exitcode.Ok { - return s.pollPrecommitMsgFail(ctx, task, execResult[0]) - } - - maddr, err := address.NewIDAddress(uint64(task.SpID)) - if err != nil { - return err - } - - pci, err := s.api.StateSectorPreCommitInfo(ctx, maddr, abi.SectorNumber(task.SectorNumber), types.EmptyTSK) - if err != nil { - return xerrors.Errorf("get precommit info: %w", err) - } - - if pci != nil { - randHeight := pci.PreCommitEpoch + policy.GetPreCommitChallengeDelay() - - _, err := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET - seed_epoch = $1, precommit_msg_tsk = $2, after_precommit_msg_success = TRUE - WHERE sp_id = $3 AND sector_number = $4 AND seed_epoch IS NULL`, - randHeight, execResult[0].ExecutedTskCID, task.SpID, task.SectorNumber) - if err != nil { - return xerrors.Errorf("update sectors_sdr_pipeline: %w", err) - } - } // todo handle missing precommit info (eg expired precommit) - - } - } - - return nil -} - -func (s *SealPoller) pollPrecommitMsgFail(ctx context.Context, task pollTask, execResult dbExecResult) error { - switch exitcode.ExitCode(execResult.ExecutedRcptExitCode) { - case exitcode.SysErrInsufficientFunds: - fallthrough - case exitcode.SysErrOutOfGas: - // just retry - return s.pollRetryPrecommitMsgSend(ctx, task, execResult) - default: - return xerrors.Errorf("precommit message failed with exit code %s", exitcode.ExitCode(execResult.ExecutedRcptExitCode)) - } -} - -func (s *SealPoller) pollRetryPrecommitMsgSend(ctx context.Context, task pollTask, execResult dbExecResult) error { - if execResult.PrecommitMsgCID == nil { - return xerrors.Errorf("precommit msg cid was nil") - } - - // make the pipeline entry seem like precommit send didn't happen, next poll loop will retry - - _, err := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET - precommit_msg_cid = NULL, task_id_precommit_msg = NULL, after_precommit_msg = FALSE - WHERE precommit_msg_cid = $1 AND sp_id = $2 AND sector_number = $3 AND after_precommit_msg_success = FALSE`, - *execResult.PrecommitMsgCID, task.SpID, task.SectorNumber) - if err != nil { - return xerrors.Errorf("update sectors_sdr_pipeline to retry precommit msg send: %w", err) - } - - return nil -} diff --git a/curiosrc/seal/sector_num_alloc.go b/curiosrc/seal/sector_num_alloc.go deleted file mode 100644 index 010ebee395d..00000000000 --- a/curiosrc/seal/sector_num_alloc.go +++ /dev/null @@ -1,127 +0,0 @@ -package seal - -import ( - "context" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - rlepluslazy "github.com/filecoin-project/go-bitfield/rle" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" -) - -type AllocAPI interface { - StateMinerAllocated(context.Context, address.Address, types.TipSetKey) (*bitfield.BitField, error) -} - -func AllocateSectorNumbers(ctx context.Context, a AllocAPI, db *harmonydb.DB, maddr address.Address, count int, txcb ...func(*harmonydb.Tx, []abi.SectorNumber) (bool, error)) ([]abi.SectorNumber, error) { - chainAlloc, err := a.StateMinerAllocated(ctx, maddr, types.EmptyTSK) - if err != nil { - return nil, xerrors.Errorf("getting on-chain allocated sector numbers: %w", err) - } - - mid, err := address.IDFromAddress(maddr) - if err != nil { - return nil, xerrors.Errorf("getting miner id: %w", err) - } - - var res []abi.SectorNumber - - comm, err := db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { - res = nil // reset result in case of retry - - // query from db, if exists unmarsal to bitfield - var dbAllocated bitfield.BitField - var rawJson []byte - - err = tx.QueryRow("SELECT COALESCE(allocated, '[0]') from sectors_allocated_numbers sa FULL OUTER JOIN (SELECT 1) AS d ON TRUE WHERE sp_id = $1 OR sp_id IS NULL", mid).Scan(&rawJson) - if err != nil { - return false, xerrors.Errorf("querying allocated sector numbers: %w", err) - } - - if rawJson != nil { - err = dbAllocated.UnmarshalJSON(rawJson) - if err != nil { - return false, xerrors.Errorf("unmarshaling allocated sector numbers: %w", err) - } - } - - if err := dbAllocated.UnmarshalJSON(rawJson); err != nil { - return false, xerrors.Errorf("unmarshaling allocated sector numbers: %w", err) - } - - merged, err := bitfield.MergeBitFields(*chainAlloc, dbAllocated) - if err != nil { - return false, xerrors.Errorf("merging allocated sector numbers: %w", err) - } - - allAssignable, err := bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{Runs: []rlepluslazy.Run{ - { - Val: true, - Len: abi.MaxSectorNumber, - }, - }}) - if err != nil { - return false, xerrors.Errorf("creating assignable sector numbers: %w", err) - } - - inverted, err := bitfield.SubtractBitField(allAssignable, merged) - if err != nil { - return false, xerrors.Errorf("subtracting allocated sector numbers: %w", err) - } - - toAlloc, err := inverted.Slice(0, uint64(count)) - if err != nil { - return false, xerrors.Errorf("getting slice of allocated sector numbers: %w", err) - } - - err = toAlloc.ForEach(func(u uint64) error { - res = append(res, abi.SectorNumber(u)) - return nil - }) - if err != nil { - return false, xerrors.Errorf("iterating allocated sector numbers: %w", err) - } - - toPersist, err := bitfield.MergeBitFields(merged, toAlloc) - if err != nil { - return false, xerrors.Errorf("merging allocated sector numbers: %w", err) - } - - rawJson, err = toPersist.MarshalJSON() - if err != nil { - return false, xerrors.Errorf("marshaling allocated sector numbers: %w", err) - } - - _, err = tx.Exec("INSERT INTO sectors_allocated_numbers(sp_id, allocated) VALUES($1, $2) ON CONFLICT(sp_id) DO UPDATE SET allocated = $2", mid, rawJson) - if err != nil { - return false, xerrors.Errorf("persisting allocated sector numbers: %w", err) - } - - for i, f := range txcb { - commit, err = f(tx, res) - if err != nil { - return false, xerrors.Errorf("executing tx callback %d: %w", i, err) - } - - if !commit { - return false, nil - } - } - - return true, nil - }, harmonydb.OptionRetry()) - - if err != nil { - return nil, xerrors.Errorf("allocating sector numbers: %w", err) - } - if !comm { - return nil, xerrors.Errorf("allocating sector numbers: commit failed") - } - - return res, nil -} diff --git a/curiosrc/seal/task_finalize.go b/curiosrc/seal/task_finalize.go deleted file mode 100644 index 2b362d7bead..00000000000 --- a/curiosrc/seal/task_finalize.go +++ /dev/null @@ -1,156 +0,0 @@ -package seal - -import ( - "context" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/curiosrc/ffi" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -type FinalizeTask struct { - max int - sp *SealPoller - sc *ffi.SealCalls - db *harmonydb.DB -} - -func NewFinalizeTask(max int, sp *SealPoller, sc *ffi.SealCalls, db *harmonydb.DB) *FinalizeTask { - return &FinalizeTask{ - max: max, - sp: sp, - sc: sc, - db: db, - } -} - -func (f *FinalizeTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - var tasks []struct { - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - RegSealProof int64 `db:"reg_seal_proof"` - } - - ctx := context.Background() - - err = f.db.Select(ctx, &tasks, ` - SELECT sp_id, sector_number, reg_seal_proof FROM sectors_sdr_pipeline WHERE task_id_finalize = $1`, taskID) - if err != nil { - return false, xerrors.Errorf("getting task: %w", err) - } - - if len(tasks) != 1 { - return false, xerrors.Errorf("expected one task") - } - task := tasks[0] - - var keepUnsealed bool - - if err := f.db.QueryRow(ctx, `SELECT COALESCE(BOOL_OR(NOT data_delete_on_finalize), FALSE) FROM sectors_sdr_initial_pieces WHERE sp_id = $1 AND sector_number = $2`, task.SpID, task.SectorNumber).Scan(&keepUnsealed); err != nil { - return false, err - } - - sector := storiface.SectorRef{ - ID: abi.SectorID{ - Miner: abi.ActorID(task.SpID), - Number: abi.SectorNumber(task.SectorNumber), - }, - ProofType: abi.RegisteredSealProof(task.RegSealProof), - } - - err = f.sc.FinalizeSector(ctx, sector, keepUnsealed) - if err != nil { - return false, xerrors.Errorf("finalizing sector: %w", err) - } - - if err := DropSectorPieceRefs(ctx, f.db, sector.ID); err != nil { - return false, xerrors.Errorf("dropping sector piece refs: %w", err) - } - - // set after_finalize - _, err = f.db.Exec(ctx, `update sectors_sdr_pipeline set after_finalize=true where task_id_finalize=$1`, taskID) - if err != nil { - return false, xerrors.Errorf("updating task: %w", err) - } - - return true, nil -} - -func (f *FinalizeTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - var tasks []struct { - TaskID harmonytask.TaskID `db:"task_id_finalize"` - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - StorageID string `db:"storage_id"` - } - - if storiface.FTCache != 4 { - panic("storiface.FTCache != 4") - } - - ctx := context.Background() - - indIDs := make([]int64, len(ids)) - for i, id := range ids { - indIDs[i] = int64(id) - } - - err := f.db.Select(ctx, &tasks, ` - SELECT p.task_id_finalize, p.sp_id, p.sector_number, l.storage_id FROM sectors_sdr_pipeline p - INNER JOIN sector_location l ON p.sp_id = l.miner_id AND p.sector_number = l.sector_num - WHERE task_id_finalize = ANY ($1) AND l.sector_filetype = 4 -`, indIDs) - if err != nil { - return nil, xerrors.Errorf("getting tasks: %w", err) - } - - ls, err := f.sc.LocalStorage(ctx) - if err != nil { - return nil, xerrors.Errorf("getting local storage: %w", err) - } - - acceptables := map[harmonytask.TaskID]bool{} - - for _, t := range ids { - acceptables[t] = true - } - - for _, t := range tasks { - if _, ok := acceptables[t.TaskID]; !ok { - continue - } - - for _, l := range ls { - if string(l.ID) == t.StorageID { - return &t.TaskID, nil - } - } - } - - return nil, nil -} - -func (f *FinalizeTask) TypeDetails() harmonytask.TaskTypeDetails { - return harmonytask.TaskTypeDetails{ - Max: f.max, - Name: "Finalize", - Cost: resources.Resources{ - Cpu: 1, - Gpu: 0, - Ram: 100 << 20, - }, - MaxFailures: 10, - } -} - -func (f *FinalizeTask) Adder(taskFunc harmonytask.AddTaskFunc) { - f.sp.pollers[pollerFinalize].Set(taskFunc) -} - -var _ harmonytask.TaskInterface = &FinalizeTask{} diff --git a/curiosrc/seal/task_movestorage.go b/curiosrc/seal/task_movestorage.go deleted file mode 100644 index 6037a390dc7..00000000000 --- a/curiosrc/seal/task_movestorage.go +++ /dev/null @@ -1,176 +0,0 @@ -package seal - -import ( - "context" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/curiosrc/ffi" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -type MoveStorageTask struct { - sp *SealPoller - sc *ffi.SealCalls - db *harmonydb.DB - - max int -} - -func NewMoveStorageTask(sp *SealPoller, sc *ffi.SealCalls, db *harmonydb.DB, max int) *MoveStorageTask { - return &MoveStorageTask{ - max: max, - sp: sp, - sc: sc, - db: db, - } -} - -func (m *MoveStorageTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - var tasks []struct { - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - RegSealProof int64 `db:"reg_seal_proof"` - } - - ctx := context.Background() - - err = m.db.Select(ctx, &tasks, ` - SELECT sp_id, sector_number, reg_seal_proof FROM sectors_sdr_pipeline WHERE task_id_move_storage = $1`, taskID) - if err != nil { - return false, xerrors.Errorf("getting task: %w", err) - } - if len(tasks) != 1 { - return false, xerrors.Errorf("expected one task") - } - task := tasks[0] - - sector := storiface.SectorRef{ - ID: abi.SectorID{ - Miner: abi.ActorID(task.SpID), - Number: abi.SectorNumber(task.SectorNumber), - }, - ProofType: abi.RegisteredSealProof(task.RegSealProof), - } - - err = m.sc.MoveStorage(ctx, sector, &taskID) - if err != nil { - return false, xerrors.Errorf("moving storage: %w", err) - } - - _, err = m.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET after_move_storage = true WHERE task_id_move_storage = $1`, taskID) - if err != nil { - return false, xerrors.Errorf("updating task: %w", err) - } - - return true, nil -} - -func (m *MoveStorageTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - - ctx := context.Background() - /* - - var tasks []struct { - TaskID harmonytask.TaskID `db:"task_id_finalize"` - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - StorageID string `db:"storage_id"` - } - - indIDs := make([]int64, len(ids)) - for i, id := range ids { - indIDs[i] = int64(id) - } - err := m.db.Select(ctx, &tasks, ` - select p.task_id_move_storage, p.sp_id, p.sector_number, l.storage_id from sectors_sdr_pipeline p - inner join sector_location l on p.sp_id=l.miner_id and p.sector_number=l.sector_num - where task_id_move_storage in ($1) and l.sector_filetype=4`, indIDs) - if err != nil { - return nil, xerrors.Errorf("getting tasks: %w", err) - } - - ls, err := m.sc.LocalStorage(ctx) - if err != nil { - return nil, xerrors.Errorf("getting local storage: %w", err) - } - - acceptables := map[harmonytask.TaskID]bool{} - - for _, t := range ids { - acceptables[t] = true - } - - for _, t := range tasks { - - } - - todo some smarts - * yield a schedule cycle/s if we have moves already in progress - */ - - //// - ls, err := m.sc.LocalStorage(ctx) - if err != nil { - return nil, xerrors.Errorf("getting local storage: %w", err) - } - var haveStorage bool - for _, l := range ls { - if l.CanStore { - haveStorage = true - break - } - } - - if !haveStorage { - return nil, nil - } - - id := ids[0] - return &id, nil -} - -func (m *MoveStorageTask) TypeDetails() harmonytask.TaskTypeDetails { - ssize := abi.SectorSize(32 << 30) // todo task details needs taskID to get correct sector size - if isDevnet { - ssize = abi.SectorSize(2 << 20) - } - - return harmonytask.TaskTypeDetails{ - Max: m.max, - Name: "MoveStorage", - Cost: resources.Resources{ - Cpu: 1, - Gpu: 0, - Ram: 128 << 20, - Storage: m.sc.Storage(m.taskToSector, storiface.FTNone, storiface.FTCache|storiface.FTSealed|storiface.FTUnsealed, ssize, storiface.PathStorage), - }, - MaxFailures: 10, - } -} - -func (m *MoveStorageTask) taskToSector(id harmonytask.TaskID) (ffi.SectorRef, error) { - var refs []ffi.SectorRef - - err := m.db.Select(context.Background(), &refs, `SELECT sp_id, sector_number, reg_seal_proof FROM sectors_sdr_pipeline WHERE task_id_move_storage = $1`, id) - if err != nil { - return ffi.SectorRef{}, xerrors.Errorf("getting sector ref: %w", err) - } - - if len(refs) != 1 { - return ffi.SectorRef{}, xerrors.Errorf("expected 1 sector ref, got %d", len(refs)) - } - - return refs[0], nil -} - -func (m *MoveStorageTask) Adder(taskFunc harmonytask.AddTaskFunc) { - m.sp.pollers[pollerMoveStorage].Set(taskFunc) -} - -var _ harmonytask.TaskInterface = &MoveStorageTask{} diff --git a/curiosrc/seal/task_porep.go b/curiosrc/seal/task_porep.go deleted file mode 100644 index 58e307bc020..00000000000 --- a/curiosrc/seal/task_porep.go +++ /dev/null @@ -1,216 +0,0 @@ -package seal - -import ( - "bytes" - "context" - "strings" - - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/crypto" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/curiosrc/ffi" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -type PoRepAPI interface { - ChainHead(context.Context) (*types.TipSet, error) - StateGetRandomnessFromBeacon(context.Context, crypto.DomainSeparationTag, abi.ChainEpoch, []byte, types.TipSetKey) (abi.Randomness, error) -} - -type PoRepTask struct { - db *harmonydb.DB - api PoRepAPI - sp *SealPoller - sc *ffi.SealCalls - - max int -} - -func NewPoRepTask(db *harmonydb.DB, api PoRepAPI, sp *SealPoller, sc *ffi.SealCalls, maxPoRep int) *PoRepTask { - return &PoRepTask{ - db: db, - api: api, - sp: sp, - sc: sc, - max: maxPoRep, - } -} - -func (p *PoRepTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - ctx := context.Background() - - var sectorParamsArr []struct { - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - RegSealProof abi.RegisteredSealProof `db:"reg_seal_proof"` - TicketEpoch abi.ChainEpoch `db:"ticket_epoch"` - TicketValue []byte `db:"ticket_value"` - SeedEpoch abi.ChainEpoch `db:"seed_epoch"` - SealedCID string `db:"tree_r_cid"` - UnsealedCID string `db:"tree_d_cid"` - } - - err = p.db.Select(ctx, §orParamsArr, ` - SELECT sp_id, sector_number, reg_seal_proof, ticket_epoch, ticket_value, seed_epoch, tree_r_cid, tree_d_cid - FROM sectors_sdr_pipeline - WHERE task_id_porep = $1`, taskID) - if err != nil { - return false, err - } - if len(sectorParamsArr) != 1 { - return false, xerrors.Errorf("expected 1 sector params, got %d", len(sectorParamsArr)) - } - sectorParams := sectorParamsArr[0] - - sealed, err := cid.Parse(sectorParams.SealedCID) - if err != nil { - return false, xerrors.Errorf("failed to parse sealed cid: %w", err) - } - - unsealed, err := cid.Parse(sectorParams.UnsealedCID) - if err != nil { - return false, xerrors.Errorf("failed to parse unsealed cid: %w", err) - } - - ts, err := p.api.ChainHead(ctx) - if err != nil { - return false, xerrors.Errorf("failed to get chain head: %w", err) - } - - maddr, err := address.NewIDAddress(uint64(sectorParams.SpID)) - if err != nil { - return false, xerrors.Errorf("failed to create miner address: %w", err) - } - - buf := new(bytes.Buffer) - if err := maddr.MarshalCBOR(buf); err != nil { - return false, xerrors.Errorf("failed to marshal miner address: %w", err) - } - - rand, err := p.api.StateGetRandomnessFromBeacon(ctx, crypto.DomainSeparationTag_InteractiveSealChallengeSeed, sectorParams.SeedEpoch, buf.Bytes(), ts.Key()) - if err != nil { - return false, xerrors.Errorf("failed to get randomness for computing seal proof: %w", err) - } - - sr := storiface.SectorRef{ - ID: abi.SectorID{ - Miner: abi.ActorID(sectorParams.SpID), - Number: abi.SectorNumber(sectorParams.SectorNumber), - }, - ProofType: sectorParams.RegSealProof, - } - - // COMPUTE THE PROOF! - - proof, err := p.sc.PoRepSnark(ctx, sr, sealed, unsealed, sectorParams.TicketValue, abi.InteractiveSealRandomness(rand)) - if err != nil { - end, err := p.recoverErrors(ctx, sectorParams.SpID, sectorParams.SectorNumber, err) - if err != nil { - return false, xerrors.Errorf("recover errors: %w", err) - } - if end { - // done, but the error handling has stored a different than success state - return true, nil - } - - return false, xerrors.Errorf("failed to compute seal proof: %w", err) - } - - // store success! - n, err := p.db.Exec(ctx, `UPDATE sectors_sdr_pipeline - SET after_porep = TRUE, seed_value = $3, porep_proof = $4 - WHERE sp_id = $1 AND sector_number = $2`, - sectorParams.SpID, sectorParams.SectorNumber, []byte(rand), proof) - if err != nil { - return false, xerrors.Errorf("store sdr success: updating pipeline: %w", err) - } - if n != 1 { - return false, xerrors.Errorf("store sdr success: updated %d rows", n) - } - - return true, nil -} - -func (p *PoRepTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - // todo sort by priority - - id := ids[0] - return &id, nil -} - -func (p *PoRepTask) TypeDetails() harmonytask.TaskTypeDetails { - res := harmonytask.TaskTypeDetails{ - Max: p.max, - Name: "PoRep", - Cost: resources.Resources{ - Cpu: 1, - Gpu: 1, - Ram: 50 << 30, // todo correct value - MachineID: 0, - }, - MaxFailures: 5, - Follows: nil, - } - - if isDevnet { - res.Cost.Ram = 1 << 30 - } - - return res -} - -func (p *PoRepTask) Adder(taskFunc harmonytask.AddTaskFunc) { - p.sp.pollers[pollerPoRep].Set(taskFunc) -} - -func (p *PoRepTask) recoverErrors(ctx context.Context, spid, snum int64, cerr error) (end bool, err error) { - const ( - // rust-fil-proofs error strings - // https://github.com/filecoin-project/rust-fil-proofs/blob/3f018b51b6327b135830899d237a7ba181942d7e/storage-proofs-porep/src/stacked/vanilla/proof.rs#L454C1-L463 - errstrInvalidCommD = "Invalid comm_d detected at challenge_index" - errstrInvalidCommR = "Invalid comm_r detected at challenge_index" - errstrInvalidEncoding = "Invalid encoding proof generated at layer" - ) - - if cerr == nil { - return false, xerrors.Errorf("nil error") - } - - switch { - case strings.Contains(cerr.Error(), errstrInvalidCommD): - fallthrough - case strings.Contains(cerr.Error(), errstrInvalidCommR): - // todo: it might be more optimal to just retry the Trees compute first. - // Invalid CommD/R likely indicates a problem with the data computed in that step - // For now for simplicity just retry the whole thing - fallthrough - case strings.Contains(cerr.Error(), errstrInvalidEncoding): - n, err := p.db.Exec(ctx, `UPDATE sectors_sdr_pipeline - SET after_porep = FALSE, after_sdr = FALSE, after_tree_d = FALSE, - after_tree_r = FALSE, after_tree_c = FALSE - WHERE sp_id = $1 AND sector_number = $2`, - spid, snum) - if err != nil { - return false, xerrors.Errorf("store sdr success: updating pipeline: %w", err) - } - if n != 1 { - return false, xerrors.Errorf("store sdr success: updated %d rows", n) - } - - return true, nil - - default: - // if end is false the original error will be returned by the caller - return false, nil - } -} - -var _ harmonytask.TaskInterface = &PoRepTask{} diff --git a/curiosrc/seal/task_sdr.go b/curiosrc/seal/task_sdr.go deleted file mode 100644 index 4c1164e0581..00000000000 --- a/curiosrc/seal/task_sdr.go +++ /dev/null @@ -1,239 +0,0 @@ -package seal - -import ( - "bytes" - "context" - - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-commp-utils/nonffi" - "github.com/filecoin-project/go-commp-utils/zerocomm" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/crypto" - - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/policy" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/curiosrc/ffi" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -var isDevnet = build.BlockDelaySecs < 30 - -type SDRAPI interface { - ChainHead(context.Context) (*types.TipSet, error) - StateGetRandomnessFromTickets(context.Context, crypto.DomainSeparationTag, abi.ChainEpoch, []byte, types.TipSetKey) (abi.Randomness, error) -} - -type SDRTask struct { - api SDRAPI - db *harmonydb.DB - sp *SealPoller - - sc *ffi.SealCalls - - max int -} - -func NewSDRTask(api SDRAPI, db *harmonydb.DB, sp *SealPoller, sc *ffi.SealCalls, maxSDR int) *SDRTask { - return &SDRTask{ - api: api, - db: db, - sp: sp, - sc: sc, - max: maxSDR, - } -} - -func (s *SDRTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - ctx := context.Background() - - var sectorParamsArr []struct { - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - RegSealProof abi.RegisteredSealProof `db:"reg_seal_proof"` - } - - err = s.db.Select(ctx, §orParamsArr, ` - SELECT sp_id, sector_number, reg_seal_proof - FROM sectors_sdr_pipeline - WHERE task_id_sdr = $1`, taskID) - if err != nil { - return false, xerrors.Errorf("getting sector params: %w", err) - } - - if len(sectorParamsArr) != 1 { - return false, xerrors.Errorf("expected 1 sector params, got %d", len(sectorParamsArr)) - } - sectorParams := sectorParamsArr[0] - - var pieces []struct { - PieceIndex int64 `db:"piece_index"` - PieceCID string `db:"piece_cid"` - PieceSize int64 `db:"piece_size"` - } - - err = s.db.Select(ctx, &pieces, ` - SELECT piece_index, piece_cid, piece_size - FROM sectors_sdr_initial_pieces - WHERE sp_id = $1 AND sector_number = $2 ORDER BY piece_index ASC`, sectorParams.SpID, sectorParams.SectorNumber) - if err != nil { - return false, xerrors.Errorf("getting pieces: %w", err) - } - - ssize, err := sectorParams.RegSealProof.SectorSize() - if err != nil { - return false, xerrors.Errorf("getting sector size: %w", err) - } - - var commd cid.Cid - - if len(pieces) > 0 { - pieceInfos := make([]abi.PieceInfo, len(pieces)) - for i, p := range pieces { - c, err := cid.Parse(p.PieceCID) - if err != nil { - return false, xerrors.Errorf("parsing piece cid: %w", err) - } - - pieceInfos[i] = abi.PieceInfo{ - Size: abi.PaddedPieceSize(p.PieceSize), - PieceCID: c, - } - } - - commd, err = nonffi.GenerateUnsealedCID(sectorParams.RegSealProof, pieceInfos) - if err != nil { - return false, xerrors.Errorf("computing CommD: %w", err) - } - } else { - commd = zerocomm.ZeroPieceCommitment(abi.PaddedPieceSize(ssize).Unpadded()) - } - - sref := storiface.SectorRef{ - ID: abi.SectorID{ - Miner: abi.ActorID(sectorParams.SpID), - Number: abi.SectorNumber(sectorParams.SectorNumber), - }, - ProofType: sectorParams.RegSealProof, - } - - // get ticket - maddr, err := address.NewIDAddress(uint64(sectorParams.SpID)) - if err != nil { - return false, xerrors.Errorf("getting miner address: %w", err) - } - - // FAIL: api may be down - // FAIL-RESP: rely on harmony retry - ticket, ticketEpoch, err := s.getTicket(ctx, maddr) - if err != nil { - return false, xerrors.Errorf("getting ticket: %w", err) - } - - // do the SDR!! - - // FAIL: storage may not have enough space - // FAIL-RESP: rely on harmony retry - - // LATEFAIL: compute error in sdr - // LATEFAIL-RESP: Check in Trees task should catch this; Will retry computing - // Trees; After one retry, it should return the sector to the - // SDR stage; max number of retries should be configurable - - err = s.sc.GenerateSDR(ctx, taskID, sref, ticket, commd) - if err != nil { - return false, xerrors.Errorf("generating sdr: %w", err) - } - - // store success! - n, err := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline - SET after_sdr = true, ticket_epoch = $3, ticket_value = $4 - WHERE sp_id = $1 AND sector_number = $2`, - sectorParams.SpID, sectorParams.SectorNumber, ticketEpoch, []byte(ticket)) - if err != nil { - return false, xerrors.Errorf("store sdr success: updating pipeline: %w", err) - } - if n != 1 { - return false, xerrors.Errorf("store sdr success: updated %d rows", n) - } - - return true, nil -} - -func (s *SDRTask) getTicket(ctx context.Context, maddr address.Address) (abi.SealRandomness, abi.ChainEpoch, error) { - ts, err := s.api.ChainHead(ctx) - if err != nil { - return nil, 0, xerrors.Errorf("getting chain head: %w", err) - } - - ticketEpoch := ts.Height() - policy.SealRandomnessLookback - buf := new(bytes.Buffer) - if err := maddr.MarshalCBOR(buf); err != nil { - return nil, 0, xerrors.Errorf("marshaling miner address: %w", err) - } - - rand, err := s.api.StateGetRandomnessFromTickets(ctx, crypto.DomainSeparationTag_SealRandomness, ticketEpoch, buf.Bytes(), ts.Key()) - if err != nil { - return nil, 0, xerrors.Errorf("getting randomness from tickets: %w", err) - } - - return abi.SealRandomness(rand), ticketEpoch, nil -} - -func (s *SDRTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - id := ids[0] - return &id, nil -} - -func (s *SDRTask) TypeDetails() harmonytask.TaskTypeDetails { - ssize := abi.SectorSize(32 << 30) // todo task details needs taskID to get correct sector size - if isDevnet { - ssize = abi.SectorSize(2 << 20) - } - - res := harmonytask.TaskTypeDetails{ - Max: s.max, - Name: "SDR", - Cost: resources.Resources{ // todo offset for prefetch? - Cpu: 4, // todo multicore sdr - Gpu: 0, - Ram: 54 << 30, - Storage: s.sc.Storage(s.taskToSector, storiface.FTCache, storiface.FTNone, ssize, storiface.PathSealing), - }, - MaxFailures: 2, - Follows: nil, - } - - if isDevnet { - res.Cost.Ram = 1 << 30 - } - - return res -} - -func (s *SDRTask) Adder(taskFunc harmonytask.AddTaskFunc) { - s.sp.pollers[pollerSDR].Set(taskFunc) -} - -func (s *SDRTask) taskToSector(id harmonytask.TaskID) (ffi.SectorRef, error) { - var refs []ffi.SectorRef - - err := s.db.Select(context.Background(), &refs, `SELECT sp_id, sector_number, reg_seal_proof FROM sectors_sdr_pipeline WHERE task_id_sdr = $1`, id) - if err != nil { - return ffi.SectorRef{}, xerrors.Errorf("getting sector ref: %w", err) - } - - if len(refs) != 1 { - return ffi.SectorRef{}, xerrors.Errorf("expected 1 sector ref, got %d", len(refs)) - } - - return refs[0], nil -} - -var _ harmonytask.TaskInterface = &SDRTask{} diff --git a/curiosrc/seal/task_submit_commit.go b/curiosrc/seal/task_submit_commit.go deleted file mode 100644 index d7f133db71c..00000000000 --- a/curiosrc/seal/task_submit_commit.go +++ /dev/null @@ -1,178 +0,0 @@ -package seal - -import ( - "bytes" - "context" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/curiosrc/message" - "github.com/filecoin-project/lotus/curiosrc/multictladdr" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/storage/ctladdr" -) - -type SubmitCommitAPI interface { - ChainHead(context.Context) (*types.TipSet, error) - StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) - StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (big.Int, error) - StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorPreCommitOnChainInfo, error) - ctladdr.NodeApi -} - -type SubmitCommitTask struct { - sp *SealPoller - db *harmonydb.DB - api SubmitCommitAPI - - sender *message.Sender - as *multictladdr.MultiAddressSelector - - maxFee types.FIL -} - -func NewSubmitCommitTask(sp *SealPoller, db *harmonydb.DB, api SubmitCommitAPI, sender *message.Sender, as *multictladdr.MultiAddressSelector, maxFee types.FIL) *SubmitCommitTask { - return &SubmitCommitTask{ - sp: sp, - db: db, - api: api, - sender: sender, - as: as, - - maxFee: maxFee, - } -} - -func (s *SubmitCommitTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - ctx := context.Background() - - var sectorParamsArr []struct { - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - Proof []byte `db:"porep_proof"` - } - - err = s.db.Select(ctx, §orParamsArr, ` - SELECT sp_id, sector_number, porep_proof - FROM sectors_sdr_pipeline - WHERE task_id_commit_msg = $1`, taskID) - if err != nil { - return false, xerrors.Errorf("getting sector params: %w", err) - } - - if len(sectorParamsArr) != 1 { - return false, xerrors.Errorf("expected 1 sector params, got %d", len(sectorParamsArr)) - } - sectorParams := sectorParamsArr[0] - - maddr, err := address.NewIDAddress(uint64(sectorParams.SpID)) - if err != nil { - return false, xerrors.Errorf("getting miner address: %w", err) - } - - params := miner.ProveCommitSectorParams{ - SectorNumber: abi.SectorNumber(sectorParams.SectorNumber), - Proof: sectorParams.Proof, - } - - enc := new(bytes.Buffer) - if err := params.MarshalCBOR(enc); err != nil { - return false, xerrors.Errorf("could not serialize commit params: %w", err) - } - - ts, err := s.api.ChainHead(ctx) - if err != nil { - return false, xerrors.Errorf("getting chain head: %w", err) - } - - mi, err := s.api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return false, xerrors.Errorf("getting miner info: %w", err) - } - - pci, err := s.api.StateSectorPreCommitInfo(ctx, maddr, abi.SectorNumber(sectorParams.SectorNumber), ts.Key()) - if err != nil { - return false, xerrors.Errorf("getting precommit info: %w", err) - } - if pci == nil { - return false, xerrors.Errorf("precommit info not found on chain") - } - - collateral, err := s.api.StateMinerInitialPledgeCollateral(ctx, maddr, pci.Info, ts.Key()) - if err != nil { - return false, xerrors.Errorf("getting initial pledge collateral: %w", err) - } - - collateral = big.Sub(collateral, pci.PreCommitDeposit) - if collateral.LessThan(big.Zero()) { - collateral = big.Zero() - } - - a, _, err := s.as.AddressFor(ctx, s.api, maddr, mi, api.CommitAddr, collateral, big.Zero()) - if err != nil { - return false, xerrors.Errorf("getting address for precommit: %w", err) - } - - msg := &types.Message{ - To: maddr, - From: a, - Method: builtin.MethodsMiner.ProveCommitSector, // todo ddo provecommit3 - Params: enc.Bytes(), - Value: collateral, // todo config for pulling from miner balance!! - } - - mss := &api.MessageSendSpec{ - MaxFee: abi.TokenAmount(s.maxFee), - } - - mcid, err := s.sender.Send(ctx, msg, mss, "commit") - if err != nil { - return false, xerrors.Errorf("pushing message to mpool: %w", err) - } - - _, err = s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET commit_msg_cid = $1, after_commit_msg = TRUE WHERE sp_id = $2 AND sector_number = $3`, mcid, sectorParams.SpID, sectorParams.SectorNumber) - if err != nil { - return false, xerrors.Errorf("updating commit_msg_cid: %w", err) - } - - _, err = s.db.Exec(ctx, `INSERT INTO message_waits (signed_message_cid) VALUES ($1)`, mcid) - if err != nil { - return false, xerrors.Errorf("inserting into message_waits: %w", err) - } - - return true, nil -} - -func (s *SubmitCommitTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - id := ids[0] - return &id, nil -} - -func (s *SubmitCommitTask) TypeDetails() harmonytask.TaskTypeDetails { - return harmonytask.TaskTypeDetails{ - Max: 128, - Name: "CommitSubmit", - Cost: resources.Resources{ - Cpu: 0, - Gpu: 0, - Ram: 1 << 20, - }, - MaxFailures: 16, - } -} - -func (s *SubmitCommitTask) Adder(taskFunc harmonytask.AddTaskFunc) { - s.sp.pollers[pollerCommitMsg].Set(taskFunc) -} - -var _ harmonytask.TaskInterface = &SubmitCommitTask{} diff --git a/curiosrc/seal/task_submit_precommit.go b/curiosrc/seal/task_submit_precommit.go deleted file mode 100644 index d42bcbe0d6c..00000000000 --- a/curiosrc/seal/task_submit_precommit.go +++ /dev/null @@ -1,300 +0,0 @@ -package seal - -import ( - "bytes" - "context" - - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - actorstypes "github.com/filecoin-project/go-state-types/actors" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin" - miner12 "github.com/filecoin-project/go-state-types/builtin/v12/miner" - "github.com/filecoin-project/go-state-types/network" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/actors/policy" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/curiosrc/message" - "github.com/filecoin-project/lotus/curiosrc/multictladdr" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/storage/ctladdr" -) - -type SubmitPrecommitTaskApi interface { - ChainHead(context.Context) (*types.TipSet, error) - StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (big.Int, error) - StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) - StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) - ctladdr.NodeApi -} - -type SubmitPrecommitTask struct { - sp *SealPoller - db *harmonydb.DB - api SubmitPrecommitTaskApi - sender *message.Sender - as *multictladdr.MultiAddressSelector - - maxFee types.FIL -} - -func NewSubmitPrecommitTask(sp *SealPoller, db *harmonydb.DB, api SubmitPrecommitTaskApi, sender *message.Sender, as *multictladdr.MultiAddressSelector, maxFee types.FIL) *SubmitPrecommitTask { - return &SubmitPrecommitTask{ - sp: sp, - db: db, - api: api, - sender: sender, - as: as, - - maxFee: maxFee, - } -} - -func (s *SubmitPrecommitTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - ctx := context.Background() - - // 1. Load sector info - - var sectorParamsArr []struct { - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - RegSealProof abi.RegisteredSealProof `db:"reg_seal_proof"` - TicketEpoch abi.ChainEpoch `db:"ticket_epoch"` - SealedCID string `db:"tree_r_cid"` - UnsealedCID string `db:"tree_d_cid"` - } - - err = s.db.Select(ctx, §orParamsArr, ` - SELECT sp_id, sector_number, reg_seal_proof, ticket_epoch, tree_r_cid, tree_d_cid - FROM sectors_sdr_pipeline - WHERE task_id_precommit_msg = $1`, taskID) - if err != nil { - return false, xerrors.Errorf("getting sector params: %w", err) - } - - if len(sectorParamsArr) != 1 { - return false, xerrors.Errorf("expected 1 sector params, got %d", len(sectorParamsArr)) - } - sectorParams := sectorParamsArr[0] - - maddr, err := address.NewIDAddress(uint64(sectorParams.SpID)) - if err != nil { - return false, xerrors.Errorf("getting miner address: %w", err) - } - - sealedCID, err := cid.Parse(sectorParams.SealedCID) - if err != nil { - return false, xerrors.Errorf("parsing sealed CID: %w", err) - } - - unsealedCID, err := cid.Parse(sectorParams.UnsealedCID) - if err != nil { - return false, xerrors.Errorf("parsing unsealed CID: %w", err) - } - - // 2. Prepare message params - - head, err := s.api.ChainHead(ctx) - if err != nil { - return false, xerrors.Errorf("getting chain head: %w", err) - } - - params := miner.PreCommitSectorBatchParams2{} - - expiration := sectorParams.TicketEpoch + miner12.MaxSectorExpirationExtension - - params.Sectors = append(params.Sectors, miner.SectorPreCommitInfo{ - SealProof: sectorParams.RegSealProof, - SectorNumber: abi.SectorNumber(sectorParams.SectorNumber), - SealedCID: sealedCID, - SealRandEpoch: sectorParams.TicketEpoch, - Expiration: expiration, - }) - - { - var pieces []struct { - PieceIndex int64 `db:"piece_index"` - PieceCID string `db:"piece_cid"` - PieceSize int64 `db:"piece_size"` - - F05DealID int64 `db:"f05_deal_id"` - F05DealEndEpoch int64 `db:"f05_deal_end_epoch"` - F05DealStartEpoch int64 `db:"f05_deal_start_epoch"` - } - - err = s.db.Select(ctx, &pieces, ` - SELECT piece_index, piece_cid, piece_size, f05_deal_id, f05_deal_end_epoch, f05_deal_start_epoch - FROM sectors_sdr_initial_pieces - WHERE sp_id = $1 AND sector_number = $2 ORDER BY piece_index ASC`, sectorParams.SpID, sectorParams.SectorNumber) - if err != nil { - return false, xerrors.Errorf("getting pieces: %w", err) - } - - if len(pieces) > 1 { - return false, xerrors.Errorf("too many pieces") // todo support multiple pieces - } - - if len(pieces) > 0 { - params.Sectors[0].UnsealedCid = &unsealedCID - params.Sectors[0].Expiration = abi.ChainEpoch(pieces[0].F05DealEndEpoch) - - if abi.ChainEpoch(pieces[0].F05DealStartEpoch) < head.Height() { - // deal start epoch is in the past, can't precommit this sector anymore - _, perr := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline - SET failed = TRUE, failed_at = NOW(), failed_reason = 'past-start-epoch', failed_reason_msg = 'precommit: start epoch is in the past' - WHERE task_id_precommit_msg = $1`, taskID) - if perr != nil { - return false, xerrors.Errorf("persisting precommit start epoch expiry: %w", perr) - } - return true, xerrors.Errorf("deal start epoch is in the past") - } - - for _, p := range pieces { - params.Sectors[0].DealIDs = append(params.Sectors[0].DealIDs, abi.DealID(p.F05DealID)) - } - } - } - - nv, err := s.api.StateNetworkVersion(ctx, types.EmptyTSK) - if err != nil { - return false, xerrors.Errorf("getting network version: %w", err) - } - av, err := actorstypes.VersionForNetwork(nv) - if err != nil { - return false, xerrors.Errorf("failed to get actors version: %w", err) - } - msd, err := policy.GetMaxProveCommitDuration(av, sectorParams.RegSealProof) - if err != nil { - return false, xerrors.Errorf("failed to get max prove commit duration: %w", err) - } - - if minExpiration := sectorParams.TicketEpoch + policy.MaxPreCommitRandomnessLookback + msd + miner.MinSectorExpiration; params.Sectors[0].Expiration < minExpiration { - params.Sectors[0].Expiration = minExpiration - } - - // 3. Check precommit - - { - record, err := s.checkPrecommit(ctx, params) - if err != nil { - if record { - _, perr := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline - SET failed = TRUE, failed_at = NOW(), failed_reason = 'precommit-check', failed_reason_msg = $1 - WHERE task_id_precommit_msg = $2`, err.Error(), taskID) - if perr != nil { - return false, xerrors.Errorf("persisting precommit check error: %w", perr) - } - } - - return record, xerrors.Errorf("checking precommit: %w", err) - } - } - - // 4. Prepare and send message - - var pbuf bytes.Buffer - if err := params.MarshalCBOR(&pbuf); err != nil { - return false, xerrors.Errorf("serializing params: %w", err) - } - - collateral, err := s.api.StateMinerPreCommitDepositForPower(ctx, maddr, params.Sectors[0], types.EmptyTSK) - if err != nil { - return false, xerrors.Errorf("getting precommit deposit: %w", err) - } - - mi, err := s.api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return false, xerrors.Errorf("getting miner info: %w", err) - } - - a, _, err := s.as.AddressFor(ctx, s.api, maddr, mi, api.PreCommitAddr, collateral, big.Zero()) - if err != nil { - return false, xerrors.Errorf("getting address for precommit: %w", err) - } - - msg := &types.Message{ - To: maddr, - From: a, - Method: builtin.MethodsMiner.PreCommitSectorBatch2, - Params: pbuf.Bytes(), - Value: collateral, // todo config for pulling from miner balance!! - } - - mss := &api.MessageSendSpec{ - MaxFee: abi.TokenAmount(s.maxFee), - } - - mcid, err := s.sender.Send(ctx, msg, mss, "precommit") - if err != nil { - return false, xerrors.Errorf("sending message: %w", err) - } - - // set precommit_msg_cid - _, err = s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline - SET precommit_msg_cid = $1, after_precommit_msg = TRUE - WHERE task_id_precommit_msg = $2`, mcid, taskID) - if err != nil { - return false, xerrors.Errorf("updating precommit_msg_cid: %w", err) - } - - _, err = s.db.Exec(ctx, `INSERT INTO message_waits (signed_message_cid) VALUES ($1)`, mcid) - if err != nil { - return false, xerrors.Errorf("inserting into message_waits: %w", err) - } - - return true, nil -} - -func (s *SubmitPrecommitTask) checkPrecommit(ctx context.Context, params miner.PreCommitSectorBatchParams2) (record bool, err error) { - if len(params.Sectors) != 1 { - return false, xerrors.Errorf("expected 1 sector") - } - - preCommitInfo := params.Sectors[0] - - head, err := s.api.ChainHead(ctx) - if err != nil { - return false, xerrors.Errorf("getting chain head: %w", err) - } - height := head.Height() - - //never commit P2 message before, check ticket expiration - ticketEarliest := height - policy.MaxPreCommitRandomnessLookback - - if preCommitInfo.SealRandEpoch < ticketEarliest { - return true, xerrors.Errorf("ticket expired: seal height: %d, head: %d", preCommitInfo.SealRandEpoch+policy.SealRandomnessLookback, height) - } - - return true, nil -} - -func (s *SubmitPrecommitTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - id := ids[0] - return &id, nil -} - -func (s *SubmitPrecommitTask) TypeDetails() harmonytask.TaskTypeDetails { - return harmonytask.TaskTypeDetails{ - Max: 1024, - Name: "PreCommitSubmit", - Cost: resources.Resources{ - Cpu: 0, - Gpu: 0, - Ram: 1 << 20, - }, - MaxFailures: 16, - } -} - -func (s *SubmitPrecommitTask) Adder(taskFunc harmonytask.AddTaskFunc) { - s.sp.pollers[pollerPrecommitMsg].Set(taskFunc) -} - -var _ harmonytask.TaskInterface = &SubmitPrecommitTask{} diff --git a/curiosrc/seal/task_trees.go b/curiosrc/seal/task_trees.go deleted file mode 100644 index 7994c354aad..00000000000 --- a/curiosrc/seal/task_trees.go +++ /dev/null @@ -1,326 +0,0 @@ -package seal - -import ( - "context" - "io" - "net/http" - "net/url" - "strconv" - - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-commp-utils/nonffi" - "github.com/filecoin-project/go-commp-utils/zerocomm" - "github.com/filecoin-project/go-padreader" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/curiosrc/ffi" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/storage/pipeline/lib/nullreader" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -type TreesTask struct { - sp *SealPoller - db *harmonydb.DB - sc *ffi.SealCalls - - max int -} - -func NewTreesTask(sp *SealPoller, db *harmonydb.DB, sc *ffi.SealCalls, maxTrees int) *TreesTask { - return &TreesTask{ - sp: sp, - db: db, - sc: sc, - - max: maxTrees, - } -} - -func (t *TreesTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - ctx := context.Background() - - var sectorParamsArr []struct { - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - RegSealProof abi.RegisteredSealProof `db:"reg_seal_proof"` - } - - err = t.db.Select(ctx, §orParamsArr, ` - SELECT sp_id, sector_number, reg_seal_proof - FROM sectors_sdr_pipeline - WHERE task_id_tree_r = $1 AND task_id_tree_c = $1 AND task_id_tree_d = $1`, taskID) - if err != nil { - return false, xerrors.Errorf("getting sector params: %w", err) - } - - if len(sectorParamsArr) != 1 { - return false, xerrors.Errorf("expected 1 sector params, got %d", len(sectorParamsArr)) - } - sectorParams := sectorParamsArr[0] - - var pieces []struct { - PieceIndex int64 `db:"piece_index"` - PieceCID string `db:"piece_cid"` - PieceSize int64 `db:"piece_size"` - - DataUrl *string `db:"data_url"` - DataHeaders *[]byte `db:"data_headers"` - DataRawSize *int64 `db:"data_raw_size"` - } - - err = t.db.Select(ctx, &pieces, ` - SELECT piece_index, piece_cid, piece_size, data_url, data_headers, data_raw_size - FROM sectors_sdr_initial_pieces - WHERE sp_id = $1 AND sector_number = $2 ORDER BY piece_index ASC`, sectorParams.SpID, sectorParams.SectorNumber) - if err != nil { - return false, xerrors.Errorf("getting pieces: %w", err) - } - - ssize, err := sectorParams.RegSealProof.SectorSize() - if err != nil { - return false, xerrors.Errorf("getting sector size: %w", err) - } - - var commd cid.Cid - var dataReader io.Reader - var unpaddedData bool - - var closers []io.Closer - defer func() { - for _, c := range closers { - if err := c.Close(); err != nil { - log.Errorw("error closing piece reader", "error", err) - } - } - }() - - if len(pieces) > 0 { - pieceInfos := make([]abi.PieceInfo, len(pieces)) - pieceReaders := make([]io.Reader, len(pieces)) - - for i, p := range pieces { - // make pieceInfo - c, err := cid.Parse(p.PieceCID) - if err != nil { - return false, xerrors.Errorf("parsing piece cid: %w", err) - } - - pieceInfos[i] = abi.PieceInfo{ - Size: abi.PaddedPieceSize(p.PieceSize), - PieceCID: c, - } - - // make pieceReader - if p.DataUrl != nil { - dataUrl := *p.DataUrl - - goUrl, err := url.Parse(dataUrl) - if err != nil { - return false, xerrors.Errorf("parsing data URL: %w", err) - } - - if goUrl.Scheme == "pieceref" { - // url is to a piece reference - - refNum, err := strconv.ParseInt(goUrl.Opaque, 10, 64) - if err != nil { - return false, xerrors.Errorf("parsing piece reference number: %w", err) - } - - // get pieceID - var pieceID []struct { - PieceID storiface.PieceNumber `db:"piece_id"` - } - err = t.db.Select(ctx, &pieceID, `SELECT piece_id FROM parked_piece_refs WHERE ref_id = $1`, refNum) - if err != nil { - return false, xerrors.Errorf("getting pieceID: %w", err) - } - - if len(pieceID) != 1 { - return false, xerrors.Errorf("expected 1 pieceID, got %d", len(pieceID)) - } - - pr, err := t.sc.PieceReader(ctx, pieceID[0].PieceID) - if err != nil { - return false, xerrors.Errorf("getting piece reader: %w", err) - } - - closers = append(closers, pr) - - pieceReaders[i], _ = padreader.New(pr, uint64(*p.DataRawSize)) - } else { - pieceReaders[i], _ = padreader.New(&UrlPieceReader{ - Url: dataUrl, - RawSize: *p.DataRawSize, - }, uint64(*p.DataRawSize)) - } - - } else { // padding piece (w/o fr32 padding, added in TreeD) - pieceReaders[i] = nullreader.NewNullReader(abi.PaddedPieceSize(p.PieceSize).Unpadded()) - } - } - - commd, err = nonffi.GenerateUnsealedCID(sectorParams.RegSealProof, pieceInfos) - if err != nil { - return false, xerrors.Errorf("computing CommD: %w", err) - } - - dataReader = io.MultiReader(pieceReaders...) - unpaddedData = true - } else { - commd = zerocomm.ZeroPieceCommitment(abi.PaddedPieceSize(ssize).Unpadded()) - dataReader = nullreader.NewNullReader(abi.UnpaddedPieceSize(ssize)) - unpaddedData = false // nullreader includes fr32 zero bits - } - - sref := storiface.SectorRef{ - ID: abi.SectorID{ - Miner: abi.ActorID(sectorParams.SpID), - Number: abi.SectorNumber(sectorParams.SectorNumber), - }, - ProofType: sectorParams.RegSealProof, - } - - // D / R / C - sealed, unsealed, err := t.sc.TreeDRC(ctx, &taskID, sref, commd, abi.PaddedPieceSize(ssize), dataReader, unpaddedData) - if err != nil { - return false, xerrors.Errorf("computing tree d, r and c: %w", err) - } - - // todo synth porep - - // todo porep challenge check - - n, err := t.db.Exec(ctx, `UPDATE sectors_sdr_pipeline - SET after_tree_r = true, after_tree_c = true, after_tree_d = true, tree_r_cid = $3, tree_d_cid = $4 - WHERE sp_id = $1 AND sector_number = $2`, - sectorParams.SpID, sectorParams.SectorNumber, sealed, unsealed) - if err != nil { - return false, xerrors.Errorf("store sdr-trees success: updating pipeline: %w", err) - } - if n != 1 { - return false, xerrors.Errorf("store sdr-trees success: updated %d rows", n) - } - - return true, nil -} - -func (t *TreesTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - id := ids[0] - return &id, nil -} - -func (t *TreesTask) TypeDetails() harmonytask.TaskTypeDetails { - ssize := abi.SectorSize(32 << 30) // todo task details needs taskID to get correct sector size - if isDevnet { - ssize = abi.SectorSize(2 << 20) - } - - return harmonytask.TaskTypeDetails{ - Max: t.max, - Name: "SDRTrees", - Cost: resources.Resources{ - Cpu: 1, - Gpu: 1, - Ram: 8000 << 20, // todo - Storage: t.sc.Storage(t.taskToSector, storiface.FTSealed, storiface.FTCache, ssize, storiface.PathSealing), - }, - MaxFailures: 3, - Follows: nil, - } -} - -func (t *TreesTask) Adder(taskFunc harmonytask.AddTaskFunc) { - t.sp.pollers[pollerTrees].Set(taskFunc) -} - -func (t *TreesTask) taskToSector(id harmonytask.TaskID) (ffi.SectorRef, error) { - var refs []ffi.SectorRef - - err := t.db.Select(context.Background(), &refs, `SELECT sp_id, sector_number, reg_seal_proof FROM sectors_sdr_pipeline WHERE task_id_tree_r = $1`, id) - if err != nil { - return ffi.SectorRef{}, xerrors.Errorf("getting sector ref: %w", err) - } - - if len(refs) != 1 { - return ffi.SectorRef{}, xerrors.Errorf("expected 1 sector ref, got %d", len(refs)) - } - - return refs[0], nil -} - -type UrlPieceReader struct { - Url string - RawSize int64 // the exact number of bytes read, if we read more or less that's an error - - readSoFar int64 - closed bool - active io.ReadCloser // auto-closed on EOF -} - -func (u *UrlPieceReader) Read(p []byte) (n int, err error) { - // Check if we have already read the required amount of data - if u.readSoFar >= u.RawSize { - return 0, io.EOF - } - - // If 'active' is nil, initiate the HTTP request - if u.active == nil { - resp, err := http.Get(u.Url) - if err != nil { - return 0, err - } - - // Set 'active' to the response body - u.active = resp.Body - } - - // Calculate the maximum number of bytes we can read without exceeding RawSize - toRead := u.RawSize - u.readSoFar - if int64(len(p)) > toRead { - p = p[:toRead] - } - - n, err = u.active.Read(p) - - // Update the number of bytes read so far - u.readSoFar += int64(n) - - // If the number of bytes read exceeds RawSize, return an error - if u.readSoFar > u.RawSize { - return n, xerrors.New("read beyond the specified RawSize") - } - - // If EOF is reached, close the reader - if err == io.EOF { - cerr := u.active.Close() - u.closed = true - if cerr != nil { - log.Errorf("error closing http piece reader: %s", cerr) - } - - // if we're below the RawSize, return an unexpected EOF error - if u.readSoFar < u.RawSize { - log.Errorw("unexpected EOF", "readSoFar", u.readSoFar, "rawSize", u.RawSize, "url", u.Url) - return n, io.ErrUnexpectedEOF - } - } - - return n, err -} - -func (u *UrlPieceReader) Close() error { - if !u.closed { - u.closed = true - return u.active.Close() - } - - return nil -} - -var _ harmonytask.TaskInterface = &TreesTask{} diff --git a/curiosrc/seal/task_trees_test.go b/curiosrc/seal/task_trees_test.go deleted file mode 100644 index b65ddd4e858..00000000000 --- a/curiosrc/seal/task_trees_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package seal - -import ( - "io" - "net/http" - "net/http/httptest" - "testing" - - "github.com/stretchr/testify/require" -) - -// TestUrlPieceReader_Read tests various scenarios of reading data from UrlPieceReader -func TestUrlPieceReader_Read(t *testing.T) { - // Create a test server - testData := "This is a test string." - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - _, err := io.WriteString(w, testData) - require.NoError(t, err) - })) - defer ts.Close() - - tests := []struct { - name string - rawSize int64 - expected string - expectError bool - expectEOF bool - }{ - {"ReadExact", int64(len(testData)), testData, false, true}, - {"ReadLess", 10, testData[:10], false, false}, - {"ReadMore", int64(len(testData)) + 10, "", true, false}, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - reader := UrlPieceReader{ - Url: ts.URL, - RawSize: tt.rawSize, - } - buffer, err := io.ReadAll(&reader) - if err != nil { - if (err != io.EOF && !tt.expectError) || (err == io.EOF && !tt.expectEOF) { - t.Errorf("Read() error = %v, expectError %v, expectEOF %v", err, tt.expectError, tt.expectEOF) - } - } else { - if got := string(buffer); got != tt.expected { - t.Errorf("Read() got = %v, expected %v", got, tt.expected) - } - } - }) - } -} - -// TestUrlPieceReader_Read_Error tests the error handling of UrlPieceReader -func TestUrlPieceReader_Read_Error(t *testing.T) { - // Simulate a server that returns an error - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - http.Error(w, "error", http.StatusInternalServerError) - })) - defer ts.Close() - - reader := UrlPieceReader{ - Url: ts.URL, - RawSize: 100, - } - buffer := make([]byte, 200) - - _, err := reader.Read(buffer) - if err == nil { - t.Errorf("Expected an error, but got nil") - } -} diff --git a/curiosrc/web/api/apihelper/apihelper.go b/curiosrc/web/api/apihelper/apihelper.go deleted file mode 100644 index 07c7898e86d..00000000000 --- a/curiosrc/web/api/apihelper/apihelper.go +++ /dev/null @@ -1,19 +0,0 @@ -package apihelper - -import ( - "net/http" - "runtime/debug" - - logging "github.com/ipfs/go-log/v2" -) - -var log = logging.Logger("lp/web/apihelper") - -func OrHTTPFail(w http.ResponseWriter, err error) { - if err != nil { - w.WriteHeader(500) - _, _ = w.Write([]byte(err.Error())) - log.Errorw("http fail", "err", err, "stack", string(debug.Stack())) - panic(err) - } -} diff --git a/curiosrc/web/api/config/config.go b/curiosrc/web/api/config/config.go deleted file mode 100644 index 6f9598f7fad..00000000000 --- a/curiosrc/web/api/config/config.go +++ /dev/null @@ -1,179 +0,0 @@ -package config - -import ( - "bytes" - "context" - "encoding/json" - "net/http" - "reflect" - "time" - - "github.com/BurntSushi/toml" - "github.com/gorilla/mux" - "github.com/invopop/jsonschema" - - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/curiosrc/web/api/apihelper" - "github.com/filecoin-project/lotus/node/config" -) - -type cfg struct { - *deps.Deps -} - -func Routes(r *mux.Router, deps *deps.Deps) { - c := &cfg{deps} - // At menu.html: - r.Methods("GET").Path("/layers").HandlerFunc(c.getLayers) - r.Methods("GET").Path("/topo").HandlerFunc(c.topo) - - // At edit.html: - r.Methods("GET").Path("/schema").HandlerFunc(getSch) - r.Methods("GET").Path("/layers/{layer}").HandlerFunc(c.getLayer) - r.Methods("POST").Path("/layers/{layer}").HandlerFunc(c.setLayer) - r.Methods("GET").Path("/default").HandlerFunc(c.def) -} -func getSch(w http.ResponseWriter, r *http.Request) { - ref := jsonschema.Reflector{ - Mapper: func(i reflect.Type) *jsonschema.Schema { - if i == reflect.TypeOf(config.Duration(time.Second)) { - return &jsonschema.Schema{ - Type: "string", - Format: "duration", - } - } - return nil - }, - } - sch := ref.Reflect(config.CurioConfig{}) - // add comments - for k, doc := range config.Doc { - item, ok := sch.Definitions[k] - if !ok { - continue - } - for _, line := range doc { - item, ok := item.Properties.Get(line.Name) - if !ok { - continue - } - if line.Comment != "" { - extra := make(map[string]any) - type options struct { - InfoText string `json:"infoText"` - } - opt := options{ - InfoText: line.Comment, - } - extra["options"] = opt - item.Extras = extra - } - } - } - - var allOpt func(s *jsonschema.Schema) - allOpt = func(s *jsonschema.Schema) { - s.Required = []string{} - for _, v := range s.Definitions { - v.Required = []string{} - - allOpt(v) - } - } - allOpt(sch) - - apihelper.OrHTTPFail(w, json.NewEncoder(w).Encode(sch)) -} - -func (c *cfg) getLayers(w http.ResponseWriter, r *http.Request) { - var layers []string - apihelper.OrHTTPFail(w, c.DB.Select(context.Background(), &layers, `SELECT title FROM harmony_config ORDER BY title`)) - apihelper.OrHTTPFail(w, json.NewEncoder(w).Encode(layers)) -} - -func (c *cfg) getLayer(w http.ResponseWriter, r *http.Request) { - var layer string - apihelper.OrHTTPFail(w, c.DB.QueryRow(context.Background(), `SELECT config FROM harmony_config WHERE title = $1`, mux.Vars(r)["layer"]).Scan(&layer)) - - // Read the TOML into a struct - configStruct := map[string]any{} // NOT CurioConfig b/c we want to preserve unsets - _, err := toml.Decode(layer, &configStruct) - apihelper.OrHTTPFail(w, err) - - // Encode the struct as JSON - jsonData, err := json.Marshal(configStruct) - apihelper.OrHTTPFail(w, err) - - // Write the JSON response - w.Header().Set("Content-Type", "application/json") - _, err = w.Write(jsonData) - apihelper.OrHTTPFail(w, err) -} - -func (c *cfg) setLayer(w http.ResponseWriter, r *http.Request) { - layer := mux.Vars(r)["layer"] - var configStruct map[string]any - dec := json.NewDecoder(r.Body) - dec.UseNumber() // JSON lib by default treats number is float64() - apihelper.OrHTTPFail(w, dec.Decode(&configStruct)) - - //Encode the struct as TOML - var tomlData bytes.Buffer - err := toml.NewEncoder(&tomlData).Encode(configStruct) - apihelper.OrHTTPFail(w, err) - - configStr := tomlData.String() - - // Generate a full commented string if this is base layer - if layer == "base" { - // Parse the into CurioConfig TOML - curioCfg := config.DefaultCurioConfig() - _, err = deps.LoadConfigWithUpgrades(tomlData.String(), curioCfg) - apihelper.OrHTTPFail(w, err) - cb, err := config.ConfigUpdate(curioCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) - apihelper.OrHTTPFail(w, err) - configStr = string(cb) - } - - //Write the TOML to the database - _, err = c.DB.Exec(context.Background(), `INSERT INTO harmony_config (title, config) VALUES ($1, $2) ON CONFLICT (title) DO UPDATE SET config = $2`, layer, configStr) - apihelper.OrHTTPFail(w, err) -} - -func (c *cfg) topo(w http.ResponseWriter, r *http.Request) { - var topology []struct { - Server string `db:"server"` - CPU int `db:"cpu"` - GPU int `db:"gpu"` - RAM int `db:"ram"` - LayersCSV string `db:"layers"` - TasksCSV string `db:"tasks"` - } - apihelper.OrHTTPFail(w, c.DB.Select(context.Background(), &topology, ` - SELECT - m.host_and_port as server, - cpu, gpu, ram, layers, tasks - FROM harmony_machines m JOIN harmony_machine_details d ON m.id=d.machine_id - ORDER BY server`)) - w.Header().Set("Content-Type", "application/json") - apihelper.OrHTTPFail(w, json.NewEncoder(w).Encode(topology)) -} - -func (c *cfg) def(w http.ResponseWriter, r *http.Request) { - cb, err := config.ConfigUpdate(config.DefaultCurioConfig(), nil, config.Commented(false), config.DefaultKeepUncommented(), config.NoEnv()) - apihelper.OrHTTPFail(w, err) - - // Read the TOML into a struct - configStruct := map[string]any{} // NOT CurioConfig b/c we want to preserve unsets - _, err = toml.Decode(string(cb), &configStruct) - apihelper.OrHTTPFail(w, err) - - // Encode the struct as JSON - jsonData, err := json.Marshal(configStruct) - apihelper.OrHTTPFail(w, err) - - // Write the JSON response - w.Header().Set("Content-Type", "application/json") - _, err = w.Write(jsonData) - apihelper.OrHTTPFail(w, err) -} diff --git a/curiosrc/web/api/debug/debug.go b/curiosrc/web/api/debug/debug.go deleted file mode 100644 index c0e89ab8e29..00000000000 --- a/curiosrc/web/api/debug/debug.go +++ /dev/null @@ -1,229 +0,0 @@ -// Package debug provides the API for various debug endpoints in curio. -package debug - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "sort" - "sync" - "time" - - "github.com/BurntSushi/toml" - "github.com/gorilla/mux" - logging "github.com/ipfs/go-log/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/api/client" - "github.com/filecoin-project/lotus/build" - cliutil "github.com/filecoin-project/lotus/cli/util" - "github.com/filecoin-project/lotus/cmd/curio/deps" -) - -var log = logging.Logger("curio/web/debug") - -type debug struct { - *deps.Deps -} - -func Routes(r *mux.Router, deps *deps.Deps) { - d := debug{deps} - r.HandleFunc("/chain-state-sse", d.chainStateSSE) -} - -type rpcInfo struct { - Address string - CLayers []string - Reachable bool - SyncState string - Version string -} - -func (d *debug) chainStateSSE(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") - w.Header().Set("Access-Control-Allow-Headers", "Content-Type") - w.Header().Set("Content-Type", "text/event-stream") - w.Header().Set("Cache-Control", "no-cache") - w.Header().Set("Connection", "keep-alive") - - ctx := r.Context() - - for { - - type minimalApiInfo struct { - Apis struct { - ChainApiInfo []string - } - } - - rpcInfos := map[string]minimalApiInfo{} // config name -> api info - confNameToAddr := map[string]string{} // config name -> api address - - err := forEachConfig[minimalApiInfo](d, func(name string, info minimalApiInfo) error { - if len(info.Apis.ChainApiInfo) == 0 { - return nil - } - - rpcInfos[name] = info - - for _, addr := range info.Apis.ChainApiInfo { - ai := cliutil.ParseApiInfo(addr) - confNameToAddr[name] = ai.Addr - } - - return nil - }) - if err != nil { - log.Errorw("getting api info", "error", err) - return - } - - dedup := map[string]bool{} // for dedup by address - - infos := map[string]rpcInfo{} // api address -> rpc info - var infosLk sync.Mutex - - var wg sync.WaitGroup - for _, info := range rpcInfos { - ai := cliutil.ParseApiInfo(info.Apis.ChainApiInfo[0]) - if dedup[ai.Addr] { - continue - } - dedup[ai.Addr] = true - wg.Add(1) - go func() { - defer wg.Done() - var clayers []string - for layer, a := range confNameToAddr { - if a == ai.Addr { - clayers = append(clayers, layer) - } - } - - myinfo := rpcInfo{ - Address: ai.Addr, - Reachable: false, - CLayers: clayers, - } - defer func() { - infosLk.Lock() - defer infosLk.Unlock() - infos[ai.Addr] = myinfo - }() - da, err := ai.DialArgs("v1") - if err != nil { - log.Warnw("DialArgs", "error", err) - return - } - - ah := ai.AuthHeader() - - v1api, closer, err := client.NewFullNodeRPCV1(ctx, da, ah) - if err != nil { - log.Warnf("Not able to establish connection to node with addr: %s", ai.Addr) - return - } - defer closer() - - ver, err := v1api.Version(ctx) - if err != nil { - log.Warnw("Version", "error", err) - return - } - - head, err := v1api.ChainHead(ctx) - if err != nil { - log.Warnw("ChainHead", "error", err) - return - } - - var syncState string - switch { - case time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs*3/2): // within 1.5 epochs - syncState = "ok" - case time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs*5): // within 5 epochs - syncState = fmt.Sprintf("slow (%s behind)", time.Since(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second)) - default: - syncState = fmt.Sprintf("behind (%s behind)", time.Since(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second)) - } - - myinfo = rpcInfo{ - Address: ai.Addr, - CLayers: clayers, - Reachable: true, - Version: ver.Version, - SyncState: syncState, - } - }() - } - wg.Wait() - - var infoList []rpcInfo - for _, i := range infos { - infoList = append(infoList, i) - } - sort.Slice(infoList, func(i, j int) bool { - return infoList[i].Address < infoList[j].Address - }) - - fmt.Fprintf(w, "data: ") - err = json.NewEncoder(w).Encode(&infoList) - if err != nil { - log.Warnw("json encode", "error", err) - return - } - fmt.Fprintf(w, "\n\n") - if f, ok := w.(http.Flusher); ok { - f.Flush() - } - - time.Sleep(time.Duration(build.BlockDelaySecs) * time.Second) - - select { // stop running if there is reader. - case <-ctx.Done(): - return - default: - } - } -} - -func forEachConfig[T any](a *debug, cb func(name string, v T) error) error { - confs, err := a.loadConfigs(context.Background()) - if err != nil { - return err - } - - for name, tomlStr := range confs { // todo for-each-config - var info T - if err := toml.Unmarshal([]byte(tomlStr), &info); err != nil { - return xerrors.Errorf("unmarshaling %s config: %w", name, err) - } - - if err := cb(name, info); err != nil { - return xerrors.Errorf("cb: %w", err) - } - } - - return nil -} - -func (d *debug) loadConfigs(ctx context.Context) (map[string]string, error) { - //err := db.QueryRow(cctx.Context, `SELECT config FROM harmony_config WHERE title=$1`, layer).Scan(&text) - - rows, err := d.DB.Query(ctx, `SELECT title, config FROM harmony_config`) - if err != nil { - return nil, xerrors.Errorf("getting db configs: %w", err) - } - - configs := make(map[string]string) - for rows.Next() { - var title, config string - if err := rows.Scan(&title, &config); err != nil { - return nil, xerrors.Errorf("scanning db configs: %w", err) - } - configs[title] = config - } - - return configs, nil -} diff --git a/curiosrc/web/api/routes.go b/curiosrc/web/api/routes.go deleted file mode 100644 index cf56257ee92..00000000000 --- a/curiosrc/web/api/routes.go +++ /dev/null @@ -1,17 +0,0 @@ -// Package api provides the HTTP API for the lotus curio web gui. -package api - -import ( - "github.com/gorilla/mux" - - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/curiosrc/web/api/config" - "github.com/filecoin-project/lotus/curiosrc/web/api/debug" - "github.com/filecoin-project/lotus/curiosrc/web/api/sector" -) - -func Routes(r *mux.Router, deps *deps.Deps) { - debug.Routes(r.PathPrefix("/debug").Subrouter(), deps) - config.Routes(r.PathPrefix("/config").Subrouter(), deps) - sector.Routes(r.PathPrefix("/sector").Subrouter(), deps) -} diff --git a/curiosrc/web/api/sector/sector.go b/curiosrc/web/api/sector/sector.go deleted file mode 100644 index e3b4b3c158d..00000000000 --- a/curiosrc/web/api/sector/sector.go +++ /dev/null @@ -1,378 +0,0 @@ -package sector - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "sync" - "time" - - "github.com/docker/go-units" - "github.com/gorilla/mux" - "github.com/samber/lo" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin/v9/market" - - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/cli/spcli" - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/curiosrc/web/api/apihelper" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -const verifiedPowerGainMul = 9 - -type cfg struct { - *deps.Deps -} - -func Routes(r *mux.Router, deps *deps.Deps) { - c := &cfg{deps} - // At menu.html: - r.Methods("GET").Path("/all").HandlerFunc(c.getSectors) - r.Methods("POST").Path("/terminate").HandlerFunc(c.terminateSectors) -} - -func (c *cfg) terminateSectors(w http.ResponseWriter, r *http.Request) { - var in []struct { - MinerID int - Sector int - } - apihelper.OrHTTPFail(w, json.NewDecoder(r.Body).Decode(&in)) - toDel := map[int][]int{} - for _, s := range in { - toDel[s.MinerID] = append(toDel[s.MinerID], s.Sector) - } - - for minerInt, sectors := range toDel { - maddr, err := address.NewIDAddress(uint64(minerInt)) - apihelper.OrHTTPFail(w, err) - mi, err := c.Full.StateMinerInfo(r.Context(), maddr, types.EmptyTSK) - apihelper.OrHTTPFail(w, err) - _, err = spcli.TerminateSectors(r.Context(), c.Full, maddr, sectors, mi.Worker) - apihelper.OrHTTPFail(w, err) - for _, sectorNumber := range sectors { - id := abi.SectorID{Miner: abi.ActorID(minerInt), Number: abi.SectorNumber(sectorNumber)} - apihelper.OrHTTPFail(w, c.Stor.Remove(r.Context(), id, storiface.FTAll, true, nil)) - } - } -} - -func (c *cfg) getSectors(w http.ResponseWriter, r *http.Request) { - // TODO get sector info from chain and from database, then fold them together - // and return the result. - type sector struct { - MinerID int64 `db:"miner_id"` - SectorNum int64 `db:"sector_num"` - SectorFiletype int `db:"sector_filetype" json:"-"` // Useless? - HasSealed bool - HasUnsealed bool - HasSnap bool - ExpiresAt abi.ChainEpoch // map to Duration - IsOnChain bool - IsFilPlus bool - SealInfo string - Proving bool - Flag bool - DealWeight string - Deals string - //StorageID string `db:"storage_id"` // map to serverName - // Activation abi.ChainEpoch // map to time.Time. advanced view only - // DealIDs []abi.DealID // advanced view only - //ExpectedDayReward abi.TokenAmount - //SealProof abi.RegisteredSealProof - } - - type piece struct { - Size int64 `db:"piece_size"` - DealID uint64 `db:"f05_deal_id"` - Proposal json.RawMessage `db:"f05_deal_proposal"` - Manifest json.RawMessage `db:"direct_piece_activation_manifest"` - Miner int64 `db:"sp_id"` - Sector int64 `db:"sector_number"` - } - var sectors []sector - var pieces []piece - apihelper.OrHTTPFail(w, c.DB.Select(r.Context(), §ors, `SELECT - miner_id, sector_num, SUM(sector_filetype) as sector_filetype - FROM sector_location WHERE sector_filetype != 32 - GROUP BY miner_id, sector_num - ORDER BY miner_id, sector_num`)) - minerToAddr := map[int64]address.Address{} - head, err := c.Full.ChainHead(r.Context()) - apihelper.OrHTTPFail(w, err) - - type sectorID struct { - mID int64 - sNum uint64 - } - sectorIdx := map[sectorID]int{} - for i, s := range sectors { - sectors[i].HasSealed = s.SectorFiletype&int(storiface.FTSealed) != 0 || s.SectorFiletype&int(storiface.FTUpdate) != 0 - sectors[i].HasUnsealed = s.SectorFiletype&int(storiface.FTUnsealed) != 0 - sectors[i].HasSnap = s.SectorFiletype&int(storiface.FTUpdate) != 0 - sectorIdx[sectorID{s.MinerID, uint64(s.SectorNum)}] = i - if _, ok := minerToAddr[s.MinerID]; !ok { - minerToAddr[s.MinerID], err = address.NewIDAddress(uint64(s.MinerID)) - apihelper.OrHTTPFail(w, err) - } - } - - // Get all pieces - apihelper.OrHTTPFail(w, c.DB.Select(r.Context(), &pieces, `SELECT - sp_id, sector_number, piece_size, f05_deal_id, f05_deal_proposal, direct_piece_activation_manifest - FROM sectors_sdr_initial_pieces - ORDER BY sp_id, sector_number`)) - pieceIndex := map[sectorID][]int{} - for i, piece := range pieces { - piece := piece - cur := pieceIndex[sectorID{mID: piece.Miner, sNum: uint64(piece.Sector)}] - pieceIndex[sectorID{mID: piece.Miner, sNum: uint64(piece.Sector)}] = append(cur, i) - } - - for minerID, maddr := range minerToAddr { - onChainInfo, err := c.getCachedSectorInfo(w, r, maddr, head.Key()) - apihelper.OrHTTPFail(w, err) - for _, chainy := range onChainInfo { - st := chainy.onChain - if i, ok := sectorIdx[sectorID{minerID, uint64(st.SectorNumber)}]; ok { - sectors[i].IsOnChain = true - sectors[i].ExpiresAt = st.Expiration - sectors[i].IsFilPlus = st.VerifiedDealWeight.GreaterThan(st.DealWeight) - if ss, err := st.SealProof.SectorSize(); err == nil { - sectors[i].SealInfo = ss.ShortString() - } - sectors[i].Proving = chainy.active - if st.Expiration < head.Height() { - sectors[i].Flag = true // Flag expired sectors - } - - dw, vp := .0, .0 - f05, ddo := 0, 0 - var pi []piece - if j, ok := pieceIndex[sectorID{sectors[i].MinerID, uint64(sectors[i].SectorNum)}]; ok { - for _, k := range j { - pi = append(pi, pieces[k]) - } - } - estimate := st.Expiration-st.Activation <= 0 || sectors[i].HasSnap - if estimate { - for _, p := range pi { - if p.Proposal != nil { - var prop *market.DealProposal - apihelper.OrHTTPFail(w, json.Unmarshal(p.Proposal, &prop)) - dw += float64(prop.PieceSize) - if prop.VerifiedDeal { - vp += float64(prop.PieceSize) * verifiedPowerGainMul - } - f05++ - } - if p.Manifest != nil { - var pam *miner.PieceActivationManifest - apihelper.OrHTTPFail(w, json.Unmarshal(p.Manifest, &pam)) - dw += float64(pam.Size) - if pam.VerifiedAllocationKey != nil { - vp += float64(pam.Size) * verifiedPowerGainMul - } - ddo++ - } - } - } else { - rdw := big.Add(st.DealWeight, st.VerifiedDealWeight) - dw = float64(big.Div(rdw, big.NewInt(int64(st.Expiration-st.Activation))).Uint64()) - vp = float64(big.Div(big.Mul(st.VerifiedDealWeight, big.NewInt(verifiedPowerGainMul)), big.NewInt(int64(st.Expiration-st.Activation))).Uint64()) - for _, deal := range st.DealIDs { - - if deal > 0 { - f05++ - } - } - // DDO info is not on chain - for _, piece := range pieces { - if piece.Manifest != nil { - //var pam *miner.PieceActivationManifest - //apihelper.OrHTTPFail(w, json.Unmarshal(piece.Manifest, pam)) - //dw += float64(pam.Size) - //if pam.VerifiedAllocationKey != nil { - // vp += float64(pam.Size) * verifiedPowerGainMul - //} - ddo++ - } - } - } - sectors[i].DealWeight = "CC" - if dw > 0 { - sectors[i].DealWeight = fmt.Sprintf("%s", units.BytesSize(dw)) - } - if vp > 0 { - sectors[i].DealWeight = fmt.Sprintf("%s", units.BytesSize(vp)) - } - sectors[i].Deals = fmt.Sprintf("Market: %d, DDO: %d", f05, ddo) - } else { - // sector is on chain but not in db - s := sector{ - MinerID: minerID, - SectorNum: int64(chainy.onChain.SectorNumber), - IsOnChain: true, - ExpiresAt: chainy.onChain.Expiration, - IsFilPlus: chainy.onChain.VerifiedDealWeight.GreaterThan(chainy.onChain.DealWeight), - Proving: chainy.active, - Flag: true, // All such sectors should be flagged to be terminated - } - if ss, err := chainy.onChain.SealProof.SectorSize(); err == nil { - s.SealInfo = ss.ShortString() - } - sectors = append(sectors, s) - } - /* - info, err := c.Full.StateSectorGetInfo(r.Context(), minerToAddr[s], abi.SectorNumber(uint64(sectors[i].SectorNum)), headKey) - if err != nil { - sectors[i].IsValid = false - continue - }*/ - } - } - - // Add deal details to sectors which are not on chain - for i := range sectors { - if !sectors[i].IsOnChain { - var pi []piece - dw, vp := .0, .0 - f05, ddo := 0, 0 - - // Find if there are any deals for this sector - if j, ok := pieceIndex[sectorID{sectors[i].MinerID, uint64(sectors[i].SectorNum)}]; ok { - for _, k := range j { - pi = append(pi, pieces[k]) - } - } - - if len(pi) > 0 { - for _, piece := range pi { - if piece.Proposal != nil { - var prop *market.DealProposal - apihelper.OrHTTPFail(w, json.Unmarshal(piece.Proposal, &prop)) - dw += float64(prop.PieceSize) - if prop.VerifiedDeal { - vp += float64(prop.PieceSize) * verifiedPowerGainMul - } - f05++ - } - if piece.Manifest != nil { - var pam *miner.PieceActivationManifest - apihelper.OrHTTPFail(w, json.Unmarshal(piece.Manifest, &pam)) - dw += float64(pam.Size) - if pam.VerifiedAllocationKey != nil { - vp += float64(pam.Size) * verifiedPowerGainMul - } - ddo++ - } - } - } - if dw > 0 { - sectors[i].DealWeight = fmt.Sprintf("%s", units.BytesSize(dw)) - } else if vp > 0 { - sectors[i].DealWeight = fmt.Sprintf("%s", units.BytesSize(vp)) - } else { - sectors[i].DealWeight = "CC" - } - sectors[i].Deals = fmt.Sprintf("Market: %d, DDO: %d", f05, ddo) - } - } - apihelper.OrHTTPFail(w, json.NewEncoder(w).Encode(map[string]any{"data": sectors})) -} - -type sectorInfo struct { - onChain *miner.SectorOnChainInfo - active bool -} - -type sectorCacheEntry struct { - sectors []sectorInfo - loading chan struct{} - time.Time -} - -const cacheTimeout = 30 * time.Minute - -var mx sync.Mutex -var sectorInfoCache = map[address.Address]sectorCacheEntry{} - -// getCachedSectorInfo returns the sector info for the given miner address, -// either from the cache or by querying the chain. -// Cache can be invalidated by setting the "sector_refresh" cookie to "true". -// This is thread-safe. -// Parallel requests share the chain's first response. -func (c *cfg) getCachedSectorInfo(w http.ResponseWriter, r *http.Request, maddr address.Address, headKey types.TipSetKey) ([]sectorInfo, error) { - mx.Lock() - v, ok := sectorInfoCache[maddr] - mx.Unlock() - - if ok && v.loading != nil { - <-v.loading - mx.Lock() - v, ok = sectorInfoCache[maddr] - mx.Unlock() - } - - shouldRefreshCookie, found := lo.Find(r.Cookies(), func(item *http.Cookie) bool { return item.Name == "sector_refresh" }) - shouldRefresh := found && shouldRefreshCookie.Value == "true" - w.Header().Set("Set-Cookie", "sector_refresh=; Max-Age=0; Path=/") - - if !ok || time.Since(v.Time) > cacheTimeout || shouldRefresh { - v = sectorCacheEntry{nil, make(chan struct{}), time.Now()} - mx.Lock() - sectorInfoCache[maddr] = v - mx.Unlock() - - // Intentionally not using the context from the request, as this is a cache - onChainInfo, err := c.Full.StateMinerSectors(context.Background(), maddr, nil, headKey) - if err != nil { - mx.Lock() - delete(sectorInfoCache, maddr) - close(v.loading) - mx.Unlock() - return nil, err - } - active, err := c.Full.StateMinerActiveSectors(context.Background(), maddr, headKey) - if err != nil { - mx.Lock() - delete(sectorInfoCache, maddr) - close(v.loading) - mx.Unlock() - return nil, err - } - activebf := bitfield.New() - for i := range active { - activebf.Set(uint64(active[i].SectorNumber)) - } - infos := make([]sectorInfo, len(onChainInfo)) - for i, info := range onChainInfo { - info := info - set, err := activebf.IsSet(uint64(info.SectorNumber)) - if err != nil { - mx.Lock() - delete(sectorInfoCache, maddr) - close(v.loading) - mx.Unlock() - return nil, err - } - infos[i] = sectorInfo{ - onChain: info, - active: set, - } - } - mx.Lock() - sectorInfoCache[maddr] = sectorCacheEntry{infos, nil, time.Now()} - close(v.loading) - mx.Unlock() - return infos, nil - } - return v.sectors, nil -} diff --git a/curiosrc/web/hapi/robust_rpc.go b/curiosrc/web/hapi/robust_rpc.go deleted file mode 100644 index c10b43a03f3..00000000000 --- a/curiosrc/web/hapi/robust_rpc.go +++ /dev/null @@ -1,102 +0,0 @@ -package hapi - -import ( - "context" - "time" - - lru "github.com/hashicorp/golang-lru/v2" - blocks "github.com/ipfs/go-block-format" - - "github.com/filecoin-project/lotus/api/client" - "github.com/filecoin-project/lotus/blockstore" - "github.com/filecoin-project/lotus/chain/store" - cliutil "github.com/filecoin-project/lotus/cli/util" - "github.com/filecoin-project/lotus/lib/must" -) - -var ChainBlockCache = must.One(lru.New[blockstore.MhString, blocks.Block](4096)) - -func (a *app) watchRpc() { - ticker := time.NewTicker(watchInterval) - for { - err := a.updateRpc(context.TODO()) - if err != nil { - log.Errorw("updating rpc info", "error", err) - } - select { - case <-ticker.C: - } - } -} - -type minimalApiInfo struct { - Apis struct { - ChainApiInfo []string - } -} - -func (a *app) updateRpc(ctx context.Context) error { - rpcInfos := map[string]minimalApiInfo{} // config name -> api info - confNameToAddr := map[string]string{} // config name -> api address - - err := forEachConfig[minimalApiInfo](a, func(name string, info minimalApiInfo) error { - if len(info.Apis.ChainApiInfo) == 0 { - return nil - } - - rpcInfos[name] = info - - for _, addr := range info.Apis.ChainApiInfo { - ai := cliutil.ParseApiInfo(addr) - confNameToAddr[name] = ai.Addr - } - - return nil - }) - if err != nil { - return err - } - - apiInfos := map[string][]byte{} // api address -> token - - // for dedup by address - for _, info := range rpcInfos { - ai := cliutil.ParseApiInfo(info.Apis.ChainApiInfo[0]) - apiInfos[ai.Addr] = ai.Token - } - - a.rpcInfoLk.Lock() - - // todo improve this shared rpc logic - if a.workingApi == nil { - for addr, token := range apiInfos { - ai := cliutil.APIInfo{ - Addr: addr, - Token: token, - } - - da, err := ai.DialArgs("v1") - if err != nil { - continue - } - - ah := ai.AuthHeader() - - v1api, closer, err := client.NewFullNodeRPCV1(ctx, da, ah) - if err != nil { - continue - } - go func() { - <-ctx.Done() - closer() - }() - - a.workingApi = v1api - a.stor = store.ActorStore(ctx, blockstore.NewReadCachedBlockstore(blockstore.NewAPIBlockstore(a.workingApi), ChainBlockCache)) - } - } - - a.rpcInfoLk.Unlock() - - return nil -} diff --git a/curiosrc/web/hapi/routes.go b/curiosrc/web/hapi/routes.go deleted file mode 100644 index 61724ec0ae5..00000000000 --- a/curiosrc/web/hapi/routes.go +++ /dev/null @@ -1,58 +0,0 @@ -package hapi - -import ( - "embed" - "text/template" - - "github.com/gorilla/mux" - logging "github.com/ipfs/go-log/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/cmd/curio/deps" -) - -//go:embed web/* -var templateFS embed.FS - -func Routes(r *mux.Router, deps *deps.Deps) error { - t, err := makeTemplate().ParseFS(templateFS, "web/*") - if err != nil { - return xerrors.Errorf("parse templates: %w", err) - } - - a := &app{ - db: deps.DB, - t: t, - } - - go a.watchRpc() - go a.watchActor() - - // index page (simple info) - r.HandleFunc("/simpleinfo/actorsummary", a.actorSummary) - r.HandleFunc("/simpleinfo/machines", a.indexMachines) - r.HandleFunc("/simpleinfo/tasks", a.indexTasks) - r.HandleFunc("/simpleinfo/taskhistory", a.indexTasksHistory) - r.HandleFunc("/simpleinfo/pipeline-porep", a.indexPipelinePorep) - - // pipeline-porep page - r.HandleFunc("/pipeline-porep/sectors", a.pipelinePorepSectors) - - // node info page - r.HandleFunc("/node/{id}", a.nodeInfo) - - // sector info page - r.HandleFunc("/sector/{sp}/{id}", a.sectorInfo) - return nil -} - -func makeTemplate() *template.Template { - return template.New("").Funcs(template.FuncMap{ - "toHumanBytes": func(b int64) string { - return types.SizeStr(types.NewInt(uint64(b))) - }, - }) -} - -var log = logging.Logger("curio/web") diff --git a/curiosrc/web/hapi/simpleinfo.go b/curiosrc/web/hapi/simpleinfo.go deleted file mode 100644 index 287e11233fd..00000000000 --- a/curiosrc/web/hapi/simpleinfo.go +++ /dev/null @@ -1,857 +0,0 @@ -package hapi - -import ( - "bytes" - "context" - "fmt" - "net/http" - "os" - "sort" - "strconv" - "strings" - "sync" - "text/template" - "time" - - "github.com/gorilla/mux" - "github.com/samber/lo" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - - "github.com/filecoin-project/lotus/api/v1api" - "github.com/filecoin-project/lotus/chain/actors/adt" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/must" - "github.com/filecoin-project/lotus/storage/paths" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -type app struct { - db *harmonydb.DB - t *template.Template - - rpcInfoLk sync.Mutex - workingApi v1api.FullNode - stor adt.Store - - actorInfoLk sync.Mutex - actorInfos []actorInfo -} - -type actorInfo struct { - Address string - CLayers []string - - QualityAdjustedPower string - RawBytePower string - - ActorBalance, ActorAvailable, WorkerBalance string - - Win1, Win7, Win30 int64 - - Deadlines []actorDeadline -} - -type actorDeadline struct { - Empty bool - Current bool - Proven bool - PartFaulty bool - Faulty bool -} - -func (a *app) actorSummary(w http.ResponseWriter, r *http.Request) { - a.actorInfoLk.Lock() - defer a.actorInfoLk.Unlock() - - a.executeTemplate(w, "actor_summary", a.actorInfos) -} - -func (a *app) indexMachines(w http.ResponseWriter, r *http.Request) { - s, err := a.clusterMachineSummary(r.Context()) - if err != nil { - log.Errorf("cluster machine summary: %v", err) - http.Error(w, "internal server error", http.StatusInternalServerError) - return - } - - a.executeTemplate(w, "cluster_machines", s) -} - -func (a *app) indexTasks(w http.ResponseWriter, r *http.Request) { - s, err := a.clusterTaskSummary(r.Context()) - if err != nil { - log.Errorf("cluster task summary: %v", err) - http.Error(w, "internal server error", http.StatusInternalServerError) - return - } - - a.executeTemplate(w, "cluster_tasks", s) -} - -func (a *app) indexTasksHistory(w http.ResponseWriter, r *http.Request) { - s, err := a.clusterTaskHistorySummary(r.Context()) - if err != nil { - log.Errorf("cluster task history summary: %v", err) - http.Error(w, "internal server error", http.StatusInternalServerError) - return - } - - a.executeTemplate(w, "cluster_task_history", s) -} - -func (a *app) indexPipelinePorep(w http.ResponseWriter, r *http.Request) { - s, err := a.porepPipelineSummary(r.Context()) - if err != nil { - log.Errorf("porep pipeline summary: %v", err) - http.Error(w, "internal server error", http.StatusInternalServerError) - return - } - - a.executeTemplate(w, "pipeline_porep", s) -} - -func (a *app) nodeInfo(writer http.ResponseWriter, request *http.Request) { - params := mux.Vars(request) - - id, ok := params["id"] - if !ok { - http.Error(writer, "missing id", http.StatusBadRequest) - return - } - - intid, err := strconv.ParseInt(id, 10, 64) - if err != nil { - http.Error(writer, "invalid id", http.StatusBadRequest) - return - } - - mi, err := a.clusterNodeInfo(request.Context(), intid) - if err != nil { - log.Errorf("machine info: %v", err) - http.Error(writer, "internal server error", http.StatusInternalServerError) - return - } - - a.executePageTemplate(writer, "node_info", "Node Info", mi) -} - -func (a *app) sectorInfo(w http.ResponseWriter, r *http.Request) { - params := mux.Vars(r) - - id, ok := params["id"] - if !ok { - http.Error(w, "missing id", http.StatusBadRequest) - return - } - - intid, err := strconv.ParseInt(id, 10, 64) - if err != nil { - http.Error(w, "invalid id", http.StatusBadRequest) - return - } - - sp, ok := params["sp"] - if !ok { - http.Error(w, "missing sp", http.StatusBadRequest) - return - } - - maddr, err := address.NewFromString(sp) - if err != nil { - http.Error(w, "invalid sp", http.StatusBadRequest) - return - } - - spid, err := address.IDFromAddress(maddr) - if err != nil { - http.Error(w, "invalid sp", http.StatusBadRequest) - return - } - - ctx := r.Context() - var tasks []PipelineTask - - err = a.db.Select(ctx, &tasks, `SELECT - sp_id, sector_number, - create_time, - task_id_sdr, after_sdr, - task_id_tree_d, after_tree_d, - task_id_tree_c, after_tree_c, - task_id_tree_r, after_tree_r, - task_id_precommit_msg, after_precommit_msg, - after_precommit_msg_success, seed_epoch, - task_id_porep, porep_proof, after_porep, - task_id_finalize, after_finalize, - task_id_move_storage, after_move_storage, - task_id_commit_msg, after_commit_msg, - after_commit_msg_success, - failed, failed_reason - FROM sectors_sdr_pipeline WHERE sp_id = $1 AND sector_number = $2`, spid, intid) - if err != nil { - http.Error(w, xerrors.Errorf("failed to fetch pipeline task info: %w", err).Error(), http.StatusInternalServerError) - return - } - - if len(tasks) == 0 { - http.Error(w, "sector not found", http.StatusInternalServerError) - return - } - - head, err := a.workingApi.ChainHead(ctx) - if err != nil { - http.Error(w, xerrors.Errorf("failed to fetch chain head: %w", err).Error(), http.StatusInternalServerError) - return - } - epoch := head.Height() - - mbf, err := a.getMinerBitfields(ctx, maddr, a.stor) - if err != nil { - http.Error(w, xerrors.Errorf("failed to load miner bitfields: %w", err).Error(), http.StatusInternalServerError) - return - } - - task := tasks[0] - - afterSeed := task.SeedEpoch != nil && *task.SeedEpoch <= int64(epoch) - - var sectorLocations []struct { - CanSeal, CanStore bool - FileType storiface.SectorFileType `db:"sector_filetype"` - StorageID string `db:"storage_id"` - Urls string `db:"urls"` - } - - err = a.db.Select(ctx, §orLocations, `SELECT p.can_seal, p.can_store, l.sector_filetype, l.storage_id, p.urls FROM sector_location l - JOIN storage_path p ON l.storage_id = p.storage_id - WHERE l.sector_num = $1 and l.miner_id = $2 ORDER BY p.can_seal, p.can_store, l.storage_id`, intid, spid) - if err != nil { - http.Error(w, xerrors.Errorf("failed to fetch sector locations: %w", err).Error(), http.StatusInternalServerError) - return - } - - type fileLocations struct { - StorageID string - Urls []string - } - - type locationTable struct { - PathType *string - PathTypeRowSpan int - - FileType *string - FileTypeRowSpan int - - Locations []fileLocations - } - locs := []locationTable{} - - for i, loc := range sectorLocations { - loc := loc - - urlList := strings.Split(loc.Urls, paths.URLSeparator) - - fLoc := fileLocations{ - StorageID: loc.StorageID, - Urls: urlList, - } - - var pathTypeStr *string - var fileTypeStr *string - pathTypeRowSpan := 1 - fileTypeRowSpan := 1 - - pathType := "None" - if loc.CanSeal && loc.CanStore { - pathType = "Seal/Store" - } else if loc.CanSeal { - pathType = "Seal" - } else if loc.CanStore { - pathType = "Store" - } - pathTypeStr = &pathType - - fileType := loc.FileType.String() - fileTypeStr = &fileType - - if i > 0 { - prevNonNilPathTypeLoc := i - 1 - for prevNonNilPathTypeLoc > 0 && locs[prevNonNilPathTypeLoc].PathType == nil { - prevNonNilPathTypeLoc-- - } - if *locs[prevNonNilPathTypeLoc].PathType == *pathTypeStr { - pathTypeRowSpan = 0 - pathTypeStr = nil - locs[prevNonNilPathTypeLoc].PathTypeRowSpan++ - // only if we have extended path type we may need to extend file type - - prevNonNilFileTypeLoc := i - 1 - for prevNonNilFileTypeLoc > 0 && locs[prevNonNilFileTypeLoc].FileType == nil { - prevNonNilFileTypeLoc-- - } - if *locs[prevNonNilFileTypeLoc].FileType == *fileTypeStr { - fileTypeRowSpan = 0 - fileTypeStr = nil - locs[prevNonNilFileTypeLoc].FileTypeRowSpan++ - } - } - } - - locTable := locationTable{ - PathType: pathTypeStr, - PathTypeRowSpan: pathTypeRowSpan, - FileType: fileTypeStr, - FileTypeRowSpan: fileTypeRowSpan, - Locations: []fileLocations{fLoc}, - } - - locs = append(locs, locTable) - - } - - // TaskIDs - taskIDs := map[int64]struct{}{} - var htasks []taskSummary - { - // get non-nil task IDs - appendNonNil := func(id *int64) { - if id != nil { - taskIDs[*id] = struct{}{} - } - } - appendNonNil(task.TaskSDR) - appendNonNil(task.TaskTreeD) - appendNonNil(task.TaskTreeC) - appendNonNil(task.TaskTreeR) - appendNonNil(task.TaskPrecommitMsg) - appendNonNil(task.TaskPoRep) - appendNonNil(task.TaskFinalize) - appendNonNil(task.TaskMoveStorage) - appendNonNil(task.TaskCommitMsg) - - if len(taskIDs) > 0 { - ids := lo.Keys(taskIDs) - - var dbtasks []struct { - OwnerID *string `db:"owner_id"` - HostAndPort *string `db:"host_and_port"` - TaskID int64 `db:"id"` - Name string `db:"name"` - UpdateTime time.Time `db:"update_time"` - } - err = a.db.Select(ctx, &dbtasks, `SELECT t.owner_id, hm.host_and_port, t.id, t.name, t.update_time FROM harmony_task t LEFT JOIN curio.harmony_machines hm ON hm.id = t.owner_id WHERE t.id = ANY($1)`, ids) - if err != nil { - http.Error(w, fmt.Sprintf("failed to fetch task names: %v", err), http.StatusInternalServerError) - return - } - - for _, tn := range dbtasks { - htasks = append(htasks, taskSummary{ - Name: tn.Name, - SincePosted: time.Since(tn.UpdateTime.Local()).Round(time.Second).String(), - Owner: tn.HostAndPort, - OwnerID: tn.OwnerID, - ID: tn.TaskID, - }) - } - } - } - - mi := struct { - SectorNumber int64 - PipelinePoRep sectorListEntry - - Locations []locationTable - Tasks []taskSummary - }{ - SectorNumber: intid, - PipelinePoRep: sectorListEntry{ - PipelineTask: tasks[0], - AfterSeed: afterSeed, - - ChainAlloc: must.One(mbf.alloc.IsSet(uint64(task.SectorNumber))), - ChainSector: must.One(mbf.sectorSet.IsSet(uint64(task.SectorNumber))), - ChainActive: must.One(mbf.active.IsSet(uint64(task.SectorNumber))), - ChainUnproven: must.One(mbf.unproven.IsSet(uint64(task.SectorNumber))), - ChainFaulty: must.One(mbf.faulty.IsSet(uint64(task.SectorNumber))), - }, - - Locations: locs, - Tasks: htasks, - } - - a.executePageTemplate(w, "sector_info", "Sector Info", mi) -} - -var templateDev = os.Getenv("CURIO_WEB_DEV") == "1" - -func (a *app) executeTemplate(w http.ResponseWriter, name string, data interface{}) { - if templateDev { - fs := os.DirFS("./curiosrc/web/hapi/web") - a.t = template.Must(makeTemplate().ParseFS(fs, "*")) - } - if err := a.t.ExecuteTemplate(w, name, data); err != nil { - log.Errorf("execute template %s: %v", name, err) - http.Error(w, "internal server error", http.StatusInternalServerError) - } -} - -func (a *app) executePageTemplate(w http.ResponseWriter, name, title string, data interface{}) { - if templateDev { - fs := os.DirFS("./curiosrc/web/hapi/web") - a.t = template.Must(makeTemplate().ParseFS(fs, "*")) - } - var contentBuf bytes.Buffer - if err := a.t.ExecuteTemplate(&contentBuf, name, data); err != nil { - log.Errorf("execute template %s: %v", name, err) - http.Error(w, "internal server error", http.StatusInternalServerError) - } - a.executeTemplate(w, "root", map[string]interface{}{ - "PageTitle": title, - "Content": contentBuf.String(), - }) -} - -type machineRecentTask struct { - TaskName string - Success int64 - Fail int64 -} - -type machineSummary struct { - Address string - ID int64 - SinceContact string - - RecentTasks []*machineRecentTask -} - -type taskSummary struct { - Name string - SincePosted string - Owner, OwnerID *string - ID int64 -} - -type taskHistorySummary struct { - Name string - TaskID int64 - - Posted, Start, Queued, Took string - - Result bool - Err string - - CompletedBy string -} - -func (a *app) clusterMachineSummary(ctx context.Context) ([]machineSummary, error) { - // First get task summary for tasks completed in the last 24 hours - // NOTE: This query uses harmony_task_history_work_index, task history may get big - tsrows, err := a.db.Query(ctx, `SELECT hist.completed_by_host_and_port, hist.name, hist.result, count(1) FROM harmony_task_history hist - WHERE hist.work_end > now() - INTERVAL '1 day' - GROUP BY hist.completed_by_host_and_port, hist.name, hist.result - ORDER BY completed_by_host_and_port ASC`) - if err != nil { - return nil, err - } - defer tsrows.Close() - - // Map of machine -> task -> recent task - taskSummaries := map[string]map[string]*machineRecentTask{} - - for tsrows.Next() { - var taskName string - var result bool - var count int64 - var machine string - - if err := tsrows.Scan(&machine, &taskName, &result, &count); err != nil { - return nil, err - } - - if _, ok := taskSummaries[machine]; !ok { - taskSummaries[machine] = map[string]*machineRecentTask{} - } - - if _, ok := taskSummaries[machine][taskName]; !ok { - taskSummaries[machine][taskName] = &machineRecentTask{TaskName: taskName} - } - - if result { - taskSummaries[machine][taskName].Success = count - } else { - taskSummaries[machine][taskName].Fail = count - } - } - - // Then machine summary - rows, err := a.db.Query(ctx, "SELECT id, host_and_port, last_contact FROM harmony_machines order by host_and_port asc") - if err != nil { - return nil, err // Handle error - } - defer rows.Close() - - var summaries []machineSummary - for rows.Next() { - var m machineSummary - var lastContact time.Time - - if err := rows.Scan(&m.ID, &m.Address, &lastContact); err != nil { - return nil, err // Handle error - } - - m.SinceContact = time.Since(lastContact).Round(time.Second).String() - - // Add recent tasks - if ts, ok := taskSummaries[m.Address]; ok { - for _, t := range ts { - m.RecentTasks = append(m.RecentTasks, t) - } - sort.Slice(m.RecentTasks, func(i, j int) bool { - return m.RecentTasks[i].TaskName < m.RecentTasks[j].TaskName - }) - } - - summaries = append(summaries, m) - } - return summaries, nil -} - -func (a *app) clusterTaskSummary(ctx context.Context) ([]taskSummary, error) { - rows, err := a.db.Query(ctx, "SELECT t.id, t.name, t.update_time, t.owner_id, hm.host_and_port FROM harmony_task t LEFT JOIN curio.harmony_machines hm ON hm.id = t.owner_id ORDER BY t.update_time ASC, t.owner_id") - if err != nil { - return nil, err // Handle error - } - defer rows.Close() - - var summaries []taskSummary - for rows.Next() { - var t taskSummary - var posted time.Time - - if err := rows.Scan(&t.ID, &t.Name, &posted, &t.OwnerID, &t.Owner); err != nil { - return nil, err // Handle error - } - - t.SincePosted = time.Since(posted).Round(time.Second).String() - - summaries = append(summaries, t) - } - return summaries, nil -} - -func (a *app) clusterTaskHistorySummary(ctx context.Context) ([]taskHistorySummary, error) { - rows, err := a.db.Query(ctx, "SELECT id, name, task_id, posted, work_start, work_end, result, err, completed_by_host_and_port FROM harmony_task_history ORDER BY work_end DESC LIMIT 15") - if err != nil { - return nil, err // Handle error - } - defer rows.Close() - - var summaries []taskHistorySummary - for rows.Next() { - var t taskHistorySummary - var posted, start, end time.Time - - if err := rows.Scan(&t.TaskID, &t.Name, &t.TaskID, &posted, &start, &end, &t.Result, &t.Err, &t.CompletedBy); err != nil { - return nil, err // Handle error - } - - t.Posted = posted.Local().Round(time.Second).Format("02 Jan 06 15:04") - t.Start = start.Local().Round(time.Second).Format("02 Jan 06 15:04") - //t.End = end.Local().Round(time.Second).Format("02 Jan 06 15:04") - - t.Queued = start.Sub(posted).Round(time.Second).String() - if t.Queued == "0s" { - t.Queued = start.Sub(posted).Round(time.Millisecond).String() - } - - t.Took = end.Sub(start).Round(time.Second).String() - if t.Took == "0s" { - t.Took = end.Sub(start).Round(time.Millisecond).String() - } - - summaries = append(summaries, t) - } - return summaries, nil -} - -type porepPipelineSummary struct { - Actor string - - CountSDR int - CountTrees int - CountPrecommitMsg int - CountWaitSeed int - CountPoRep int - CountCommitMsg int - CountDone int - CountFailed int -} - -func (a *app) porepPipelineSummary(ctx context.Context) ([]porepPipelineSummary, error) { - rows, err := a.db.Query(ctx, ` - SELECT - sp_id, - COUNT(*) FILTER (WHERE after_sdr = false) as CountSDR, - COUNT(*) FILTER (WHERE (after_tree_d = false OR after_tree_c = false OR after_tree_r = false) AND after_sdr = true) as CountTrees, - COUNT(*) FILTER (WHERE after_tree_r = true and after_precommit_msg = false) as CountPrecommitMsg, - COUNT(*) FILTER (WHERE after_precommit_msg_success = false AND after_precommit_msg = true) as CountWaitSeed, - COUNT(*) FILTER (WHERE after_porep = false AND after_precommit_msg_success = true) as CountPoRep, - COUNT(*) FILTER (WHERE after_commit_msg_success = false AND after_porep = true) as CountCommitMsg, - COUNT(*) FILTER (WHERE after_commit_msg_success = true) as CountDone, - COUNT(*) FILTER (WHERE failed = true) as CountFailed - FROM - sectors_sdr_pipeline - GROUP BY sp_id`) - if err != nil { - return nil, xerrors.Errorf("query: %w", err) - } - defer rows.Close() - - var summaries []porepPipelineSummary - for rows.Next() { - var summary porepPipelineSummary - if err := rows.Scan(&summary.Actor, &summary.CountSDR, &summary.CountTrees, &summary.CountPrecommitMsg, &summary.CountWaitSeed, &summary.CountPoRep, &summary.CountCommitMsg, &summary.CountDone, &summary.CountFailed); err != nil { - return nil, xerrors.Errorf("scan: %w", err) - } - summary.Actor = "f0" + summary.Actor - - summaries = append(summaries, summary) - } - return summaries, nil -} - -type machineInfo struct { - Info struct { - Host string - ID int64 - LastContact string - CPU int64 - Memory int64 - GPU int64 - } - - // Storage - Storage []struct { - ID string - Weight int64 - MaxStorage int64 - CanSeal bool - CanStore bool - Groups string - AllowTo string - AllowTypes string - DenyTypes string - Capacity int64 - Available int64 - FSAvailable int64 - Reserved int64 - Used int64 - AllowMiners string - DenyMiners string - LastHeartbeat time.Time - HeartbeatErr *string - - UsedPercent float64 - ReservedPercent float64 - } - - /*TotalStorage struct { - MaxStorage int64 - UsedStorage int64 - - MaxSealStorage int64 - UsedSealStorage int64 - - MaxStoreStorage int64 - UsedStoreStorage int64 - }*/ - - // Tasks - RunningTasks []struct { - ID int64 - Task string - Posted string - - PoRepSector, PoRepSectorSP *int64 - } - - FinishedTasks []struct { - ID int64 - Task string - Posted string - Start string - Queued string - Took string - Outcome string - Message string - } -} - -func (a *app) clusterNodeInfo(ctx context.Context, id int64) (*machineInfo, error) { - rows, err := a.db.Query(ctx, "SELECT id, host_and_port, last_contact, cpu, ram, gpu FROM harmony_machines WHERE id=$1 ORDER BY host_and_port ASC", id) - if err != nil { - return nil, err // Handle error - } - defer rows.Close() - - var summaries []machineInfo - if rows.Next() { - var m machineInfo - var lastContact time.Time - - if err := rows.Scan(&m.Info.ID, &m.Info.Host, &lastContact, &m.Info.CPU, &m.Info.Memory, &m.Info.GPU); err != nil { - return nil, err - } - - m.Info.LastContact = time.Since(lastContact).Round(time.Second).String() - - summaries = append(summaries, m) - } - - if len(summaries) == 0 { - return nil, xerrors.Errorf("machine not found") - } - - // query storage info - rows2, err := a.db.Query(ctx, "SELECT storage_id, weight, max_storage, can_seal, can_store, groups, allow_to, allow_types, deny_types, capacity, available, fs_available, reserved, used, allow_miners, deny_miners, last_heartbeat, heartbeat_err FROM storage_path WHERE urls LIKE '%' || $1 || '%'", summaries[0].Info.Host) - if err != nil { - return nil, err - } - - defer rows2.Close() - - for rows2.Next() { - var s struct { - ID string - Weight int64 - MaxStorage int64 - CanSeal bool - CanStore bool - Groups string - AllowTo string - AllowTypes string - DenyTypes string - Capacity int64 - Available int64 - FSAvailable int64 - Reserved int64 - Used int64 - AllowMiners string - DenyMiners string - LastHeartbeat time.Time - HeartbeatErr *string - - UsedPercent float64 - ReservedPercent float64 - } - if err := rows2.Scan(&s.ID, &s.Weight, &s.MaxStorage, &s.CanSeal, &s.CanStore, &s.Groups, &s.AllowTo, &s.AllowTypes, &s.DenyTypes, &s.Capacity, &s.Available, &s.FSAvailable, &s.Reserved, &s.Used, &s.AllowMiners, &s.DenyMiners, &s.LastHeartbeat, &s.HeartbeatErr); err != nil { - return nil, err - } - - s.UsedPercent = float64(s.Capacity-s.FSAvailable) * 100 / float64(s.Capacity) - s.ReservedPercent = float64(s.Capacity-(s.FSAvailable+s.Reserved))*100/float64(s.Capacity) - s.UsedPercent - - summaries[0].Storage = append(summaries[0].Storage, s) - } - - // tasks - rows3, err := a.db.Query(ctx, "SELECT id, name, posted_time FROM harmony_task WHERE owner_id=$1", summaries[0].Info.ID) - if err != nil { - return nil, err - } - - defer rows3.Close() - - for rows3.Next() { - var t struct { - ID int64 - Task string - Posted string - - PoRepSector *int64 - PoRepSectorSP *int64 - } - - var posted time.Time - if err := rows3.Scan(&t.ID, &t.Task, &posted); err != nil { - return nil, err - } - t.Posted = time.Since(posted).Round(time.Second).String() - - { - // try to find in the porep pipeline - rows4, err := a.db.Query(ctx, `SELECT sp_id, sector_number FROM sectors_sdr_pipeline - WHERE task_id_sdr=$1 - OR task_id_tree_d=$1 - OR task_id_tree_c=$1 - OR task_id_tree_r=$1 - OR task_id_precommit_msg=$1 - OR task_id_porep=$1 - OR task_id_commit_msg=$1 - OR task_id_finalize=$1 - OR task_id_move_storage=$1 - `, t.ID) - if err != nil { - return nil, err - } - - if rows4.Next() { - var spid int64 - var sector int64 - if err := rows4.Scan(&spid, §or); err != nil { - return nil, err - } - t.PoRepSector = §or - t.PoRepSectorSP = &spid - } - - rows4.Close() - } - - summaries[0].RunningTasks = append(summaries[0].RunningTasks, t) - } - - rows5, err := a.db.Query(ctx, `SELECT name, task_id, posted, work_start, work_end, result, err FROM harmony_task_history WHERE completed_by_host_and_port = $1 ORDER BY work_end DESC LIMIT 15`, summaries[0].Info.Host) - if err != nil { - return nil, err - } - defer rows5.Close() - - for rows5.Next() { - var ft struct { - ID int64 - Task string - Posted string - Start string - Queued string - Took string - Outcome string - - Message string - } - - var posted, start, end time.Time - var result bool - if err := rows5.Scan(&ft.Task, &ft.ID, &posted, &start, &end, &result, &ft.Message); err != nil { - return nil, err - } - - ft.Outcome = "Success" - if !result { - ft.Outcome = "Failed" - } - - // Format the times and durations - ft.Posted = posted.Format("02 Jan 06 15:04 MST") - ft.Start = start.Format("02 Jan 06 15:04 MST") - ft.Queued = fmt.Sprintf("%s", start.Sub(posted).Round(time.Second).String()) - ft.Took = fmt.Sprintf("%s", end.Sub(start).Round(time.Second)) - - summaries[0].FinishedTasks = append(summaries[0].FinishedTasks, ft) - } - - return &summaries[0], nil -} diff --git a/curiosrc/web/hapi/simpleinfo_pipeline_porep.go b/curiosrc/web/hapi/simpleinfo_pipeline_porep.go deleted file mode 100644 index a37cd14ab28..00000000000 --- a/curiosrc/web/hapi/simpleinfo_pipeline_porep.go +++ /dev/null @@ -1,195 +0,0 @@ -package hapi - -import ( - "context" - "net/http" - "time" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - - "github.com/filecoin-project/lotus/chain/actors/adt" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/must" -) - -type PipelineTask struct { - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - - CreateTime time.Time `db:"create_time"` - - TaskSDR *int64 `db:"task_id_sdr"` - AfterSDR bool `db:"after_sdr"` - - TaskTreeD *int64 `db:"task_id_tree_d"` - AfterTreeD bool `db:"after_tree_d"` - - TaskTreeC *int64 `db:"task_id_tree_c"` - AfterTreeC bool `db:"after_tree_c"` - - TaskTreeR *int64 `db:"task_id_tree_r"` - AfterTreeR bool `db:"after_tree_r"` - - TaskPrecommitMsg *int64 `db:"task_id_precommit_msg"` - AfterPrecommitMsg bool `db:"after_precommit_msg"` - - AfterPrecommitMsgSuccess bool `db:"after_precommit_msg_success"` - SeedEpoch *int64 `db:"seed_epoch"` - - TaskPoRep *int64 `db:"task_id_porep"` - PoRepProof []byte `db:"porep_proof"` - AfterPoRep bool `db:"after_porep"` - - TaskFinalize *int64 `db:"task_id_finalize"` - AfterFinalize bool `db:"after_finalize"` - - TaskMoveStorage *int64 `db:"task_id_move_storage"` - AfterMoveStorage bool `db:"after_move_storage"` - - TaskCommitMsg *int64 `db:"task_id_commit_msg"` - AfterCommitMsg bool `db:"after_commit_msg"` - - AfterCommitMsgSuccess bool `db:"after_commit_msg_success"` - - Failed bool `db:"failed"` - FailedReason string `db:"failed_reason"` -} - -type sectorListEntry struct { - PipelineTask - - Address address.Address - CreateTime string - AfterSeed bool - - ChainAlloc, ChainSector, ChainActive, ChainUnproven, ChainFaulty bool -} - -type minerBitfields struct { - alloc, sectorSet, active, unproven, faulty bitfield.BitField -} - -func (a *app) pipelinePorepSectors(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - var tasks []PipelineTask - - err := a.db.Select(ctx, &tasks, `SELECT - sp_id, sector_number, - create_time, - task_id_sdr, after_sdr, - task_id_tree_d, after_tree_d, - task_id_tree_c, after_tree_c, - task_id_tree_r, after_tree_r, - task_id_precommit_msg, after_precommit_msg, - after_precommit_msg_success, seed_epoch, - task_id_porep, porep_proof, after_porep, - task_id_finalize, after_finalize, - task_id_move_storage, after_move_storage, - task_id_commit_msg, after_commit_msg, - after_commit_msg_success, - failed, failed_reason - FROM sectors_sdr_pipeline order by sp_id, sector_number`) // todo where constrain list - if err != nil { - http.Error(w, xerrors.Errorf("failed to fetch pipeline tasks: %w", err).Error(), http.StatusInternalServerError) - return - } - - head, err := a.workingApi.ChainHead(ctx) - if err != nil { - http.Error(w, xerrors.Errorf("failed to fetch chain head: %w", err).Error(), http.StatusInternalServerError) - return - } - epoch := head.Height() - - minerBitfieldCache := map[address.Address]minerBitfields{} - - sectorList := make([]sectorListEntry, 0, len(tasks)) - for _, task := range tasks { - task := task - - task.CreateTime = task.CreateTime.Local() - - addr, err := address.NewIDAddress(uint64(task.SpID)) - if err != nil { - http.Error(w, xerrors.Errorf("failed to create actor address: %w", err).Error(), http.StatusInternalServerError) - return - } - - mbf, ok := minerBitfieldCache[addr] - if !ok { - mbf, err := a.getMinerBitfields(ctx, addr, a.stor) - if err != nil { - http.Error(w, xerrors.Errorf("failed to load miner bitfields: %w", err).Error(), http.StatusInternalServerError) - return - } - minerBitfieldCache[addr] = mbf - } - - afterSeed := task.SeedEpoch != nil && *task.SeedEpoch <= int64(epoch) - - sectorList = append(sectorList, sectorListEntry{ - PipelineTask: task, - Address: addr, - CreateTime: task.CreateTime.Format(time.DateTime), - AfterSeed: afterSeed, - - ChainAlloc: must.One(mbf.alloc.IsSet(uint64(task.SectorNumber))), - ChainSector: must.One(mbf.sectorSet.IsSet(uint64(task.SectorNumber))), - ChainActive: must.One(mbf.active.IsSet(uint64(task.SectorNumber))), - ChainUnproven: must.One(mbf.unproven.IsSet(uint64(task.SectorNumber))), - ChainFaulty: must.One(mbf.faulty.IsSet(uint64(task.SectorNumber))), - }) - } - - a.executeTemplate(w, "pipeline_porep_sectors", sectorList) -} - -func (a *app) getMinerBitfields(ctx context.Context, addr address.Address, stor adt.Store) (minerBitfields, error) { - act, err := a.workingApi.StateGetActor(ctx, addr, types.EmptyTSK) - if err != nil { - return minerBitfields{}, xerrors.Errorf("failed to load actor: %w", err) - } - - mas, err := miner.Load(stor, act) - if err != nil { - return minerBitfields{}, xerrors.Errorf("failed to load miner actor: %w", err) - } - - activeSectors, err := miner.AllPartSectors(mas, miner.Partition.ActiveSectors) - if err != nil { - return minerBitfields{}, xerrors.Errorf("failed to load active sectors: %w", err) - } - - allSectors, err := miner.AllPartSectors(mas, miner.Partition.AllSectors) - if err != nil { - return minerBitfields{}, xerrors.Errorf("failed to load all sectors: %w", err) - } - - unproved, err := miner.AllPartSectors(mas, miner.Partition.UnprovenSectors) - if err != nil { - return minerBitfields{}, xerrors.Errorf("failed to load unproven sectors: %w", err) - } - - faulty, err := miner.AllPartSectors(mas, miner.Partition.FaultySectors) - if err != nil { - return minerBitfields{}, xerrors.Errorf("failed to load faulty sectors: %w", err) - } - - alloc, err := mas.GetAllocatedSectors() - if err != nil { - return minerBitfields{}, xerrors.Errorf("failed to load allocated sectors: %w", err) - } - - return minerBitfields{ - alloc: *alloc, - sectorSet: allSectors, - active: activeSectors, - unproven: unproved, - faulty: faulty, - }, nil -} diff --git a/curiosrc/web/hapi/watch_actor.go b/curiosrc/web/hapi/watch_actor.go deleted file mode 100644 index 51e1f51e74d..00000000000 --- a/curiosrc/web/hapi/watch_actor.go +++ /dev/null @@ -1,286 +0,0 @@ -package hapi - -import ( - "context" - "sort" - "time" - - "github.com/BurntSushi/toml" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - - "github.com/filecoin-project/lotus/blockstore" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" -) - -const watchInterval = time.Second * 10 - -func (a *app) watchActor() { - ticker := time.NewTicker(watchInterval) - for { - err := a.updateActor(context.TODO()) - if err != nil { - log.Errorw("updating rpc info", "error", err) - } - select { - case <-ticker.C: - } - } -} - -type minimalActorInfo struct { - Addresses []struct { - MinerAddresses []string - } -} - -var startedAt = time.Now() - -func (a *app) updateActor(ctx context.Context) error { - a.rpcInfoLk.Lock() - api := a.workingApi - a.rpcInfoLk.Unlock() - - stor := store.ActorStore(ctx, blockstore.NewReadCachedBlockstore(blockstore.NewAPIBlockstore(a.workingApi), ChainBlockCache)) - - if api == nil { - if time.Since(startedAt) > time.Second*10 { - log.Warnw("no working api yet") - } - return nil - } - - var actorInfos []actorInfo - - confNameToAddr := map[address.Address][]string{} // address -> config names - - err := forEachConfig[minimalActorInfo](a, func(name string, info minimalActorInfo) error { - for _, aset := range info.Addresses { - for _, addr := range aset.MinerAddresses { - a, err := address.NewFromString(addr) - if err != nil { - return xerrors.Errorf("parsing address: %w", err) - } - confNameToAddr[a] = append(confNameToAddr[a], name) - } - } - - return nil - }) - if err != nil { - return err - } - - wins, err := a.spWins(ctx) - if err != nil { - return xerrors.Errorf("getting sp wins: %w", err) - } - - for addr, cnames := range confNameToAddr { - p, err := api.StateMinerPower(ctx, addr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting miner power: %w", err) - } - - dls, err := api.StateMinerDeadlines(ctx, addr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting deadlines: %w", err) - } - - mact, err := api.StateGetActor(ctx, addr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting actor: %w", err) - } - - mas, err := miner.Load(stor, mact) - if err != nil { - return err - } - - outDls := []actorDeadline{} - - for dlidx := range dls { - p, err := api.StateMinerPartitions(ctx, addr, uint64(dlidx), types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting partition: %w", err) - } - - dl := actorDeadline{ - Empty: false, - Current: false, // todo - Proven: false, - PartFaulty: false, - Faulty: false, - } - - var live, faulty uint64 - - for _, part := range p { - l, err := part.LiveSectors.Count() - if err != nil { - return xerrors.Errorf("getting live sectors: %w", err) - } - live += l - - f, err := part.FaultySectors.Count() - if err != nil { - return xerrors.Errorf("getting faulty sectors: %w", err) - } - faulty += f - } - - dl.Empty = live == 0 - dl.Proven = live > 0 && faulty == 0 - dl.PartFaulty = faulty > 0 - dl.Faulty = faulty > 0 && faulty == live - - outDls = append(outDls, dl) - } - - pd, err := api.StateMinerProvingDeadline(ctx, addr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting proving deadline: %w", err) - } - - if len(outDls) != 48 { - return xerrors.Errorf("expected 48 deadlines, got %d", len(outDls)) - } - - outDls[pd.Index].Current = true - - avail, err := mas.AvailableBalance(mact.Balance) - if err != nil { - return xerrors.Errorf("getting available balance: %w", err) - } - - mi, err := mas.Info() - if err != nil { - return xerrors.Errorf("getting miner info: %w", err) - } - - wbal, err := api.WalletBalance(ctx, mi.Worker) - if err != nil { - return xerrors.Errorf("getting worker balance: %w", err) - } - - sort.Strings(cnames) - - actorInfos = append(actorInfos, actorInfo{ - Address: addr.String(), - CLayers: cnames, - QualityAdjustedPower: types.DeciStr(p.MinerPower.QualityAdjPower), - RawBytePower: types.DeciStr(p.MinerPower.RawBytePower), - Deadlines: outDls, - - ActorBalance: types.FIL(mact.Balance).Short(), - ActorAvailable: types.FIL(avail).Short(), - WorkerBalance: types.FIL(wbal).Short(), - - Win1: wins[addr].Win1, // note: zero values are fine here - Win7: wins[addr].Win7, - Win30: wins[addr].Win30, - }) - } - - sort.Slice(actorInfos, func(i, j int) bool { - return actorInfos[i].Address < actorInfos[j].Address - }) - - a.actorInfoLk.Lock() - a.actorInfos = actorInfos - a.actorInfoLk.Unlock() - - return nil -} - -func (a *app) loadConfigs(ctx context.Context) (map[string]string, error) { - rows, err := a.db.Query(ctx, `SELECT title, config FROM harmony_config`) - if err != nil { - return nil, xerrors.Errorf("getting db configs: %w", err) - } - - configs := make(map[string]string) - for rows.Next() { - var title, config string - if err := rows.Scan(&title, &config); err != nil { - return nil, xerrors.Errorf("scanning db configs: %w", err) - } - configs[title] = config - } - - return configs, nil -} - -type wins struct { - SpID int64 `db:"sp_id"` - Win1 int64 `db:"win1"` - Win7 int64 `db:"win7"` - Win30 int64 `db:"win30"` -} - -func (a *app) spWins(ctx context.Context) (map[address.Address]wins, error) { - var w []wins - - // note: this query uses mining_tasks_won_sp_id_base_compute_time_index - err := a.db.Select(ctx, &w, `WITH wins AS ( - SELECT - sp_id, - base_compute_time, - won - FROM - mining_tasks - WHERE - won = true - AND base_compute_time > NOW() - INTERVAL '30 days' - ) - - SELECT - sp_id, - COUNT(*) FILTER (WHERE base_compute_time > NOW() - INTERVAL '1 day') AS "win1", - COUNT(*) FILTER (WHERE base_compute_time > NOW() - INTERVAL '7 days') AS "win7", - COUNT(*) FILTER (WHERE base_compute_time > NOW() - INTERVAL '30 days') AS "win30" - FROM - wins - GROUP BY - sp_id - ORDER BY - sp_id`) - if err != nil { - return nil, xerrors.Errorf("query win counts: %w", err) - } - - wm := make(map[address.Address]wins) - for _, wi := range w { - ma, err := address.NewIDAddress(uint64(wi.SpID)) - if err != nil { - return nil, xerrors.Errorf("parsing miner address: %w", err) - } - - wm[ma] = wi - } - - return wm, nil -} - -func forEachConfig[T any](a *app, cb func(name string, v T) error) error { - confs, err := a.loadConfigs(context.Background()) - if err != nil { - return err - } - - for name, tomlStr := range confs { - var info T - if err := toml.Unmarshal([]byte(tomlStr), &info); err != nil { - return xerrors.Errorf("unmarshaling %s config: %w", name, err) - } - - if err := cb(name, info); err != nil { - return xerrors.Errorf("cb: %w", err) - } - } - - return nil -} diff --git a/curiosrc/web/hapi/web/actor_summary.gohtml b/curiosrc/web/hapi/web/actor_summary.gohtml deleted file mode 100644 index bf577d802e0..00000000000 --- a/curiosrc/web/hapi/web/actor_summary.gohtml +++ /dev/null @@ -1,30 +0,0 @@ -{{define "actor_summary"}} -{{range .}} - - {{.Address}} - - {{range .CLayers}} - {{.}} - {{end}} - - {{.QualityAdjustedPower}} - -
- {{range .Deadlines}} -
- {{end}} -
- - {{.ActorBalance}} - {{.ActorAvailable}} - {{.WorkerBalance}} - - - - - -
1day:  {{.Win1}}
7day:  {{.Win7}}
30day: {{.Win30}}
- - -{{end}} -{{end}} \ No newline at end of file diff --git a/curiosrc/web/hapi/web/chain_rpcs.gohtml b/curiosrc/web/hapi/web/chain_rpcs.gohtml deleted file mode 100644 index 5705da39517..00000000000 --- a/curiosrc/web/hapi/web/chain_rpcs.gohtml +++ /dev/null @@ -1,15 +0,0 @@ -{{define "chain_rpcs"}} -{{range .}} - - {{.Address}} - - {{range .CLayers}} - {{.}} - {{end}} - - {{if .Reachable}}ok{{else}}FAIL{{end}} - {{if eq "ok" .SyncState}}ok{{else}}{{.SyncState}}{{end}} - {{.Version}} - -{{end}} -{{end}} diff --git a/curiosrc/web/hapi/web/cluster_machines.gohtml b/curiosrc/web/hapi/web/cluster_machines.gohtml deleted file mode 100644 index 5fb18b52c24..00000000000 --- a/curiosrc/web/hapi/web/cluster_machines.gohtml +++ /dev/null @@ -1,12 +0,0 @@ -{{define "cluster_machines"}} -{{range .}} - - {{.Address}} - {{.ID}} - {{.SinceContact}} - {{range .RecentTasks}} - {{.TaskName}}:{{.Success}}{{if ne 0 .Fail}}({{.Fail}}){{end}} - {{end}} - -{{end}} -{{end}} diff --git a/curiosrc/web/hapi/web/cluster_task_history.gohtml b/curiosrc/web/hapi/web/cluster_task_history.gohtml deleted file mode 100644 index f95dbb2b26b..00000000000 --- a/curiosrc/web/hapi/web/cluster_task_history.gohtml +++ /dev/null @@ -1,19 +0,0 @@ -{{define "cluster_task_history"}} - {{range .}} - - {{.Name}} - {{.TaskID}} - {{.CompletedBy}} - {{.Posted}} - {{.Start}} - {{.Queued}} - {{.Took}} - {{if .Result}}success{{else}}error{{end}} - -
- {{.Err}} -
- - - {{end}} -{{end}} diff --git a/curiosrc/web/hapi/web/cluster_tasks.gohtml b/curiosrc/web/hapi/web/cluster_tasks.gohtml deleted file mode 100644 index b7b3faec0ef..00000000000 --- a/curiosrc/web/hapi/web/cluster_tasks.gohtml +++ /dev/null @@ -1,10 +0,0 @@ -{{define "cluster_tasks"}} - {{range .}} - - {{.Name}} - {{.ID}} - {{.SincePosted}} - {{if ne nil .OwnerID}}{{.Owner}}{{end}} - - {{end}} -{{end}} diff --git a/curiosrc/web/hapi/web/node_info.gohtml b/curiosrc/web/hapi/web/node_info.gohtml deleted file mode 100644 index 16f60a47522..00000000000 --- a/curiosrc/web/hapi/web/node_info.gohtml +++ /dev/null @@ -1,100 +0,0 @@ -{{define "node_info"}} -

Info

- - - - - - - - - - - - - - - - - - - -
HostIDLast ContactCPUMemoryGPUDebug
{{.Info.Host}}{{.Info.ID}}{{.Info.LastContact}}{{.Info.CPU}}{{toHumanBytes .Info.Memory}}{{.Info.GPU}}[pprof]
-
-

Storage

- - - - - - - - - - {{range .Storage}} - - - - - - - - - {{end}} - -
IDTypeCapacityAvailableReserved
{{.ID}} - {{if and (not .CanSeal) (not .CanStore)}}ReadOnly{{end}} - {{if and (.CanSeal) (not .CanStore)}}Seal{{end}} - {{if and (not .CanSeal) (.CanStore)}}Store{{end}} - {{if and (.CanSeal) (.CanStore)}}Seal+Store{{end}} - {{toHumanBytes .Capacity}}{{toHumanBytes .Available}}{{toHumanBytes .Reserved}} -
-
-
-
-
-
-

Tasks

-

Running

- - - - - - - - {{range .RunningTasks}} - - - - - - - {{end}} -
IDTaskPostedSector
{{.ID}}{{.Task}}{{.Posted}}{{if ne nil .PoRepSector}}f0{{.PoRepSectorSP}}:{{.PoRepSector}}{{end}}
-

Recently Finished

- - - - - - - - - - - - {{range .FinishedTasks}} - - - - - - - - - - - {{end}} -
IDTaskPostedStartQueuedTookOutcomeMessage
{{.ID}}{{.Task}}{{.Posted}}{{.Start}}{{.Queued}}{{.Took}}{{.Outcome}}{{.Message}}
-{{end}} diff --git a/curiosrc/web/hapi/web/pipeline_porep_sectors.gohtml b/curiosrc/web/hapi/web/pipeline_porep_sectors.gohtml deleted file mode 100644 index 82f0ad19671..00000000000 --- a/curiosrc/web/hapi/web/pipeline_porep_sectors.gohtml +++ /dev/null @@ -1,200 +0,0 @@ -{{define "sector_porep_state"}} - - - - - - - - - - - - - - - - - - - - - - - - - -
-
SDR
-
- {{if .AfterSDR}}done{{else}} - {{if ne .TaskSDR nil}}T:{{.TaskSDR}}{{else}}--{{end}} - {{end}} -
-
-
TreeC
-
- {{if .AfterTreeC}}done{{else}} - {{if ne .TaskTreeC nil}}T:{{.TaskTreeC}}{{else}}--{{end}} - {{end}} -
-
-
PComm Msg
-
- {{if .AfterPrecommitMsg}}done{{else}} - {{if ne .TaskPrecommitMsg nil}}T:{{.TaskPrecommitMsg}}{{else}}--{{end}} - {{end}} -
-
-
PComm Wait
-
- {{if .AfterPrecommitMsgSuccess}}done{{else}} - -- - {{end}} -
-
-
Wait Seed
-
- {{if .AfterSeed}}done{{else}} - {{if ne .SeedEpoch nil}}@{{.SeedEpoch}}{{else}}--{{end}} - {{end}} -
-
-
PoRep
-
- {{if .AfterPoRep}}done{{else}} - {{if ne .TaskPoRep nil}}T:{{.TaskPoRep}}{{else}}--{{end}} - {{end}} -
-
-
Clear Cache
-
- {{if .AfterFinalize}}done{{else}} - {{if ne .TaskFinalize nil}}T:{{.TaskFinalize}}{{else}}--{{end}} - {{end}} -
-
-
Move Storage
-
- {{if .AfterMoveStorage}}done{{else}} - {{if ne .TaskMoveStorage nil}}T:{{.TaskMoveStorage}}{{else}}--{{end}} - {{end}} -
-
-
On Chain
-
{{if .ChainSector}}yes{{else}}{{if .ChainAlloc}}allocated{{else}}no{{end}}{{end}}
-
-
TreeD
-
- {{if .AfterTreeD}}done{{else}} - {{if ne .TaskTreeD nil}}T:{{.TaskTreeD}}{{else}}--{{end}} - {{end}} -
-
-
TreeR
-
- {{if .AfterTreeR}}done{{else}} - {{if ne .TaskTreeR nil}}T:{{.TaskTreeR}}{{else}}--{{end}} - {{end}} -
-
-
Commit Msg
-
- {{if .AfterCommitMsg}}done{{else}} - {{if ne .TaskCommitMsg nil}}T:{{.TaskCommitMsg}}{{else}}--{{end}} - {{end}} -
-
-
Commit Wait
-
- {{if .AfterCommitMsgSuccess}}done{{else}} - -- - {{end}} -
-
-
Active
-
{{if .ChainActive}}yes{{else}} - {{if .ChainUnproven}}unproven{{else}} - {{if .ChainFaulty}}faulty{{else}}no{{end}} - {{end}} - {{end}} -
-
-{{end}} - -{{define "pipeline_porep_sectors"}} - {{range .}} - - {{.Address}} - {{.CreateTime}} - - {{template "sector_porep_state" .}} - - - DETAILS - - - - - {{.SectorNumber}} - - - {{end}} -{{end}} diff --git a/curiosrc/web/hapi/web/pipline_porep.gohtml b/curiosrc/web/hapi/web/pipline_porep.gohtml deleted file mode 100644 index 5e7c7f7c63e..00000000000 --- a/curiosrc/web/hapi/web/pipline_porep.gohtml +++ /dev/null @@ -1,15 +0,0 @@ -{{define "pipeline_porep"}} - {{range .}} - - {{.Actor}} - {{.CountSDR}} - {{.CountTrees}} - {{.CountPrecommitMsg}} - {{.CountWaitSeed}} - {{.CountPoRep}} - {{.CountCommitMsg}} - {{.CountDone}} - {{.CountFailed}} - - {{end}} -{{end}} diff --git a/curiosrc/web/hapi/web/root.gohtml b/curiosrc/web/hapi/web/root.gohtml deleted file mode 100644 index 114db6462c8..00000000000 --- a/curiosrc/web/hapi/web/root.gohtml +++ /dev/null @@ -1,26 +0,0 @@ -{{define "root"}} - - - {{.PageTitle}} - - - - - - - - - -
-
-

{{.PageTitle}}

-
-
-
-
- {{.Content}} -
-
- - -{{end}} diff --git a/curiosrc/web/hapi/web/sector_info.gohtml b/curiosrc/web/hapi/web/sector_info.gohtml deleted file mode 100644 index afa96a9234a..00000000000 --- a/curiosrc/web/hapi/web/sector_info.gohtml +++ /dev/null @@ -1,57 +0,0 @@ -{{define "sector_info"}} -

Sector {{.SectorNumber}}

-
-

PoRep Pipeline

- {{template "sector_porep_state" .PipelinePoRep}} -
-
-

Storage

- - - - - - - - {{range .Locations}} - - {{if .PathType}} - - {{end}} - {{if .FileType}} - - {{end}} - - - - {{range $i, $loc := .Locations}} - {{if gt $i 0}} - - - - - {{end}} - {{end}} - {{end}} -
Path TypeFile TypePath IDHost
{{.PathType}}{{.FileType}}{{(index .Locations 0).StorageID}}{{range (index .Locations 0).Urls}}

{{.}}

{{end}}
{{$loc.StorageID}}{{range $loc.Urls}}

{{.}}

{{end}}
-
-
-

Tasks

- - - - - - - - {{range .Tasks}} - - - - - - - {{end}} -
Task TypeTask IDPostedWorker
{{.Name}}{{.ID}}{{.SincePosted}}{{if ne nil .OwnerID}}{{.Owner}}{{end}}
-
-{{end}} diff --git a/curiosrc/web/srv.go b/curiosrc/web/srv.go deleted file mode 100644 index b16a9f9afcb..00000000000 --- a/curiosrc/web/srv.go +++ /dev/null @@ -1,82 +0,0 @@ -// Package web defines the HTTP web server for static files and endpoints. -package web - -import ( - "context" - "embed" - "io" - "io/fs" - "net" - "net/http" - "os" - "path" - "strings" - "time" - - "github.com/gorilla/mux" - "go.opencensus.io/tag" - - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/curiosrc/web/api" - "github.com/filecoin-project/lotus/curiosrc/web/hapi" - "github.com/filecoin-project/lotus/metrics" -) - -//go:embed static -var static embed.FS - -var basePath = "/static/" - -// An dev mode hack for no-restart changes to static and templates. -// You still need to recomplie the binary for changes to go code. -var webDev = os.Getenv("CURIO_WEB_DEV") == "1" - -func GetSrv(ctx context.Context, deps *deps.Deps) (*http.Server, error) { - mx := mux.NewRouter() - err := hapi.Routes(mx.PathPrefix("/hapi").Subrouter(), deps) - if err != nil { - return nil, err - } - api.Routes(mx.PathPrefix("/api").Subrouter(), deps) - - var static fs.FS = static - if webDev { - basePath = "" - static = os.DirFS("curiosrc/web/static") - } - - mx.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // If the request is for a directory, redirect to the index file. - if strings.HasSuffix(r.URL.Path, "/") { - r.URL.Path += "index.html" - } - - file, err := static.Open(path.Join(basePath, r.URL.Path)[1:]) - if err != nil { - w.WriteHeader(http.StatusNotFound) - _, _ = w.Write([]byte("404 Not Found")) - return - } - defer func() { _ = file.Close() }() - - fileInfo, err := file.Stat() - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - _, _ = w.Write([]byte("500 Internal Server Error")) - return - } - - http.ServeContent(w, r, fileInfo.Name(), fileInfo.ModTime(), file.(io.ReadSeeker)) - }) - - return &http.Server{ - Handler: http.HandlerFunc(mx.ServeHTTP), - BaseContext: func(listener net.Listener) context.Context { - ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "curio")) - return ctx - }, - Addr: deps.Cfg.Subsystems.GuiAddress, - ReadTimeout: time.Minute * 3, - ReadHeaderTimeout: time.Minute * 3, // lint - }, nil -} diff --git a/curiosrc/web/static/chain-connectivity.mjs b/curiosrc/web/static/chain-connectivity.mjs deleted file mode 100644 index bf4c80f04ce..00000000000 --- a/curiosrc/web/static/chain-connectivity.mjs +++ /dev/null @@ -1,59 +0,0 @@ -import { LitElement, html, css } from 'https://cdn.jsdelivr.net/gh/lit/dist@3/all/lit-all.min.js'; -window.customElements.define('chain-connectivity', class MyElement extends LitElement { - constructor() { - super(); - this.data = []; - this.loadData(); - } - loadData() { - const eventSource = new EventSource('/api/debug/chain-state-sse'); - eventSource.onmessage = (event) => { - this.data = JSON.parse(event.data); - super.requestUpdate(); - }; - eventSource.onerror = (error) => { - console.error('Error:', error); - loadData(); - }; - }; - - static get styles() { - return [css` - :host { - box-sizing: border-box; /* Don't forgert this to include padding/border inside width calculation */ - } - .success { - color: green; - } - .warning { - color: yellow; - } - .error { - color: red; - } - `]; - } - render = () => html` - - - - - - - - - - - - - ${this.data.map(item => html` - - - - - - - `)} - -
RPC AddressReachabilitySync StatusVersion
${item.Address}${item.Reachable ? html`ok` : html`FAIL`}${item.SyncState === "ok" ? html`ok` : html`${item.SyncState}`}${item.Version}
` -}); diff --git a/curiosrc/web/static/config/edit.html b/curiosrc/web/static/config/edit.html deleted file mode 100644 index 2802316a980..00000000000 --- a/curiosrc/web/static/config/edit.html +++ /dev/null @@ -1,160 +0,0 @@ - - - - JSON Schema Editor - - - - - - - - - -
-
-
-
- -
- -
- - -
- -
- -
-
-
- - diff --git a/curiosrc/web/static/config/index.html b/curiosrc/web/static/config/index.html deleted file mode 100644 index 769929e53a3..00000000000 --- a/curiosrc/web/static/config/index.html +++ /dev/null @@ -1,97 +0,0 @@ - - - - - Configuration Editor - - - - - - - - -
-
-
-
-

Configuration Editor

-

Click on a layer to edit its configuration

- -
-
-
-
-
- - - \ No newline at end of file diff --git a/curiosrc/web/static/favicon.svg b/curiosrc/web/static/favicon.svg deleted file mode 100644 index 91f132959f2..00000000000 --- a/curiosrc/web/static/favicon.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/curiosrc/web/static/index.html b/curiosrc/web/static/index.html deleted file mode 100644 index 0d734d6b7cb..00000000000 --- a/curiosrc/web/static/index.html +++ /dev/null @@ -1,201 +0,0 @@ - - - - Curio Cluster Overview - - - - - - - -
-
-
-
-

Chain Connectivity

- -
-
-
-
-
- -
-
-
-

Cluster Machines

- - - - - - - - - - - -
HostIDLast ContactTasks (24h)
-
-
-
-
-
- -
-
-
-

PoRep Pipeline

- - - - - - - - - - - - - - - - -
AddressSDRTreesPrecommit MsgWait SeedPoRepCommit MsgDoneFailed
-
-
-
-
-
- -
-
-
-

Actor Summary

- - - - - - - - - - - - - - - -
AddressConfig Layers AvailableQaPDeadlinesBalanceAvailableWorkerWins
-
-
-
-
-
- - -
-
-
-

Recently Finished Tasks

- - - - - - - - - - - - - - - - -
NameIDExecutorPostedStartQueuedTookOutcomeMessage
-
-
-
-
-
- -
-
-
-

Cluster Tasks

- - - - - - - - - - - -
TaskIDPostedOwner
-
-
-
-
-
-
-
- - - \ No newline at end of file diff --git a/curiosrc/web/static/pipeline_porep.html b/curiosrc/web/static/pipeline_porep.html deleted file mode 100644 index c609aea6f69..00000000000 --- a/curiosrc/web/static/pipeline_porep.html +++ /dev/null @@ -1,98 +0,0 @@ - - - Lotus Provider PoRep Pipeline - - - - - - - - - -
-
-

Lotus Provider PoRep Pipeline

-
-
-
-
-
-
-
-

Sectors

- - - - - - - - - - -
Sector IDCreate TimeState
-
-
-
-
-
- - \ No newline at end of file diff --git a/curiosrc/web/static/sector/index.html b/curiosrc/web/static/sector/index.html deleted file mode 100644 index 9ac5559cd4a..00000000000 --- a/curiosrc/web/static/sector/index.html +++ /dev/null @@ -1,129 +0,0 @@ - - - - - Sector List - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
- - - - -
Loading...
-
-
-
-
- - - - \ No newline at end of file diff --git a/curiosrc/web/static/ux/curio-ux.mjs b/curiosrc/web/static/ux/curio-ux.mjs deleted file mode 100644 index 0b883d1d777..00000000000 --- a/curiosrc/web/static/ux/curio-ux.mjs +++ /dev/null @@ -1,100 +0,0 @@ -import {LitElement, css, html} from 'https://cdn.jsdelivr.net/gh/lit/dist@3/all/lit-all.min.js'; - -//import 'https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/js/bootstrap.esm.js'; - - -class CurioUX extends LitElement { - static styles = css` -\ .curio-slot { - } - :host { - display: block; - margin: 2px 3px; - } - - `; - connectedCallback() { - super.connectedCallback(); - //"https://unpkg.com/@cds/core/global.min.css", - //"https://unpkg.com/@cds/city/css/bundles/default.min.css", - //"https://unpkg.com/@cds/core/styles/theme.dark.min.css", - //"https://unpkg.com/@clr/ui/clr-ui.min.css", - - document.head.innerHTML += ` - - - - - -` - - document.documentElement.lang = 'en'; - - // how Bootstrap & DataTables expect dark mode declared. - document.documentElement.classList.add('dark'); - - this.messsage = this.getCookieMessage(); - } - - render() { - return html` -
- - - ${this.message? html``: html``} - -
- - `; - } - - getCookieMessage() { - const name = 'message'; - const cookies = document.cookie.split(';'); - for (let i = 0; i < cookies.length; i++) { - const cookie = cookies[i].trim(); - if (cookie.startsWith(name + '=')) { - var val = cookie.substring(name.length + 1); - document.cookie = name + '=; expires=Thu, 01 Jan 1970 00:00:00 UTC; path=/;'; - return val; - } - } - return null; - } - -}; - -customElements.define('curio-ux', CurioUX); \ No newline at end of file diff --git a/curiosrc/web/static/ux/main.css b/curiosrc/web/static/ux/main.css deleted file mode 100644 index c73002ff6e1..00000000000 --- a/curiosrc/web/static/ux/main.css +++ /dev/null @@ -1,53 +0,0 @@ -@import url('https://fonts.cdnfonts.com/css/metropolis-2'); - -html { - min-height: 100vh; - background: rgb(11, 22, 34); - padding: 0; -} -body { - margin: 0; - padding: 3px 4px; - background: rgb(11, 22, 34); -} -curio-ux { - /* To resemble Clarity Design */ - color: rgb(227, 234, 237); - font-family: Metropolis, monospace; - font-weight: 400; - background: rgb(11, 22, 34); -} - - -.app-head { - width: 100%; -} -.head-left { - display: inline-block; -} -.head-right { - display: inline-block; - float: right; -} - -a { - text-decoration: none; -} - -a:link, a:visited { - color: #adf7ad; -} - -a:hover { - color: #88cc60; -} - -.success { - color: greenyellow; -} -.warning { - color: yellow; -} -.error { - color: red; -} diff --git a/curiosrc/window/compute_do.go b/curiosrc/window/compute_do.go deleted file mode 100644 index fcde14d82f6..00000000000 --- a/curiosrc/window/compute_do.go +++ /dev/null @@ -1,442 +0,0 @@ -package window - -import ( - "bytes" - "context" - "sort" - "sync" - "time" - - "github.com/ipfs/go-cid" - "go.uber.org/multierr" - "golang.org/x/xerrors" - - ffi "github.com/filecoin-project/filecoin-ffi" - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin" - miner2 "github.com/filecoin-project/go-state-types/builtin/v9/miner" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/go-state-types/dline" - "github.com/filecoin-project/go-state-types/proof" - proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" - - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/storage/sealer" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -const disablePreChecks = false // todo config - -func (t *WdPostTask) DoPartition(ctx context.Context, ts *types.TipSet, maddr address.Address, di *dline.Info, partIdx uint64) (out *miner2.SubmitWindowedPoStParams, err error) { - defer func() { - if r := recover(); r != nil { - log.Errorf("recover: %s", r) - err = xerrors.Errorf("panic in doPartition: %s", r) - } - }() - - buf := new(bytes.Buffer) - if err := maddr.MarshalCBOR(buf); err != nil { - return nil, xerrors.Errorf("failed to marshal address to cbor: %w", err) - } - - headTs, err := t.api.ChainHead(ctx) - if err != nil { - return nil, xerrors.Errorf("getting current head: %w", err) - } - - rand, err := t.api.StateGetRandomnessFromBeacon(ctx, crypto.DomainSeparationTag_WindowedPoStChallengeSeed, di.Challenge, buf.Bytes(), headTs.Key()) - if err != nil { - return nil, xerrors.Errorf("failed to get chain randomness from beacon for window post (ts=%d; deadline=%d): %w", ts.Height(), di, err) - } - - parts, err := t.api.StateMinerPartitions(ctx, maddr, di.Index, ts.Key()) - if err != nil { - return nil, xerrors.Errorf("getting partitions: %w", err) - } - - if partIdx >= uint64(len(parts)) { - return nil, xerrors.Errorf("invalid partIdx %d (deadline has %d partitions)", partIdx, len(parts)) - } - - partition := parts[partIdx] - - params := miner2.SubmitWindowedPoStParams{ - Deadline: di.Index, - Partitions: make([]miner2.PoStPartition, 0, 1), - Proofs: nil, - } - - var partitions []miner2.PoStPartition - var xsinfos []proof7.ExtendedSectorInfo - - { - toProve, err := bitfield.SubtractBitField(partition.LiveSectors, partition.FaultySectors) - if err != nil { - return nil, xerrors.Errorf("removing faults from set of sectors to prove: %w", err) - } - /*if manual { - // this is a check run, we want to prove faulty sectors, even - // if they are not declared as recovering. - toProve = partition.LiveSectors - }*/ - toProve, err = bitfield.MergeBitFields(toProve, partition.RecoveringSectors) - if err != nil { - return nil, xerrors.Errorf("adding recoveries to set of sectors to prove: %w", err) - } - - good, err := toProve.Copy() - if err != nil { - return nil, xerrors.Errorf("copy toProve: %w", err) - } - if !disablePreChecks { - good, err = checkSectors(ctx, t.api, t.faultTracker, maddr, toProve, ts.Key()) - if err != nil { - return nil, xerrors.Errorf("checking sectors to skip: %w", err) - } - } - - /*good, err = bitfield.SubtractBitField(good, postSkipped) - if err != nil { - return nil, xerrors.Errorf("toProve - postSkipped: %w", err) - } - - post skipped is legacy retry mechanism, shouldn't be needed anymore - */ - - skipped, err := bitfield.SubtractBitField(toProve, good) - if err != nil { - return nil, xerrors.Errorf("toProve - good: %w", err) - } - - sc, err := skipped.Count() - if err != nil { - return nil, xerrors.Errorf("getting skipped sector count: %w", err) - } - - skipCount := sc - - ssi, err := t.sectorsForProof(ctx, maddr, good, partition.AllSectors, ts) - if err != nil { - return nil, xerrors.Errorf("getting sorted sector info: %w", err) - } - - if len(ssi) == 0 { - return nil, xerrors.Errorf("no sectors to prove") - } - - xsinfos = append(xsinfos, ssi...) - partitions = append(partitions, miner2.PoStPartition{ - Index: partIdx, - Skipped: skipped, - }) - - log.Infow("running window post", - "chain-random", rand, - "deadline", di, - "height", ts.Height(), - "skipped", skipCount) - - tsStart := build.Clock.Now() - - mid, err := address.IDFromAddress(maddr) - if err != nil { - return nil, err - } - - nv, err := t.api.StateNetworkVersion(ctx, ts.Key()) - if err != nil { - return nil, xerrors.Errorf("getting network version: %w", err) - } - - ppt, err := xsinfos[0].SealProof.RegisteredWindowPoStProofByNetworkVersion(nv) - if err != nil { - return nil, xerrors.Errorf("failed to get window post type: %w", err) - } - - postOut, ps, err := t.generateWindowPoSt(ctx, ppt, abi.ActorID(mid), xsinfos, append(abi.PoStRandomness{}, rand...)) - elapsed := time.Since(tsStart) - log.Infow("computing window post", "partition", partIdx, "elapsed", elapsed, "skip", len(ps), "err", err) - if err != nil { - log.Errorf("error generating window post: %s", err) - } - - if err == nil { - // If we proved nothing, something is very wrong. - if len(postOut) == 0 { - log.Errorf("len(postOut) == 0") - return nil, xerrors.Errorf("received no proofs back from generate window post") - } - - headTs, err := t.api.ChainHead(ctx) - if err != nil { - return nil, xerrors.Errorf("getting current head: %w", err) - } - - checkRand, err := t.api.StateGetRandomnessFromBeacon(ctx, crypto.DomainSeparationTag_WindowedPoStChallengeSeed, di.Challenge, buf.Bytes(), headTs.Key()) - if err != nil { - return nil, xerrors.Errorf("failed to get chain randomness from beacon for window post (ts=%d; deadline=%d): %w", ts.Height(), di, err) - } - - if !bytes.Equal(checkRand, rand) { - // this is a check from legacy code, there it would retry with new randomness. - // here we don't retry because the current network version uses beacon randomness - // which should never change. We do keep this check tho to detect potential issues. - return nil, xerrors.Errorf("post generation randomness was different from random beacon") - } - - sinfos := make([]proof7.SectorInfo, len(xsinfos)) - for i, xsi := range xsinfos { - sinfos[i] = proof7.SectorInfo{ - SealProof: xsi.SealProof, - SectorNumber: xsi.SectorNumber, - SealedCID: xsi.SealedCID, - } - } - if correct, err := t.verifier.VerifyWindowPoSt(ctx, proof.WindowPoStVerifyInfo{ - Randomness: abi.PoStRandomness(checkRand), - Proofs: postOut, - ChallengedSectors: sinfos, - Prover: abi.ActorID(mid), - }); err != nil { // revive:disable-line:empty-block - /*log.Errorw("window post verification failed", "post", postOut, "error", err) - time.Sleep(5 * time.Second) - continue todo retry loop */ - } else if !correct { - _ = correct - /*log.Errorw("generated incorrect window post proof", "post", postOut, "error", err) - continue todo retry loop*/ - } - - // Proof generation successful, stop retrying - //somethingToProve = true - params.Partitions = partitions - params.Proofs = postOut - //break - - return ¶ms, nil - } - } - - return nil, xerrors.Errorf("failed to generate window post") -} - -type CheckSectorsAPI interface { - StateMinerSectors(ctx context.Context, addr address.Address, bf *bitfield.BitField, tsk types.TipSetKey) ([]*miner.SectorOnChainInfo, error) -} - -func checkSectors(ctx context.Context, api CheckSectorsAPI, ft sealer.FaultTracker, - maddr address.Address, check bitfield.BitField, tsk types.TipSetKey) (bitfield.BitField, error) { - mid, err := address.IDFromAddress(maddr) - if err != nil { - return bitfield.BitField{}, xerrors.Errorf("failed to convert to ID addr: %w", err) - } - - sectorInfos, err := api.StateMinerSectors(ctx, maddr, &check, tsk) - if err != nil { - return bitfield.BitField{}, xerrors.Errorf("failed to get sector infos: %w", err) - } - - type checkSector struct { - sealed cid.Cid - update bool - } - - sectors := make(map[abi.SectorNumber]checkSector) - var tocheck []storiface.SectorRef - for _, info := range sectorInfos { - sectors[info.SectorNumber] = checkSector{ - sealed: info.SealedCID, - update: info.SectorKeyCID != nil, - } - tocheck = append(tocheck, storiface.SectorRef{ - ProofType: info.SealProof, - ID: abi.SectorID{ - Miner: abi.ActorID(mid), - Number: info.SectorNumber, - }, - }) - } - - if len(tocheck) == 0 { - return bitfield.BitField{}, nil - } - - pp, err := tocheck[0].ProofType.RegisteredWindowPoStProof() - if err != nil { - return bitfield.BitField{}, xerrors.Errorf("failed to get window PoSt proof: %w", err) - } - pp, err = pp.ToV1_1PostProof() - if err != nil { - return bitfield.BitField{}, xerrors.Errorf("failed to convert to v1_1 post proof: %w", err) - } - - bad, err := ft.CheckProvable(ctx, pp, tocheck, func(ctx context.Context, id abi.SectorID) (cid.Cid, bool, error) { - s, ok := sectors[id.Number] - if !ok { - return cid.Undef, false, xerrors.Errorf("sealed CID not found") - } - return s.sealed, s.update, nil - }) - if err != nil { - return bitfield.BitField{}, xerrors.Errorf("checking provable sectors: %w", err) - } - for id := range bad { - delete(sectors, id.Number) - } - - log.Warnw("Checked sectors", "checked", len(tocheck), "good", len(sectors)) - - sbf := bitfield.New() - for s := range sectors { - sbf.Set(uint64(s)) - } - - return sbf, nil -} - -func (t *WdPostTask) sectorsForProof(ctx context.Context, maddr address.Address, goodSectors, allSectors bitfield.BitField, ts *types.TipSet) ([]proof7.ExtendedSectorInfo, error) { - sset, err := t.api.StateMinerSectors(ctx, maddr, &goodSectors, ts.Key()) - if err != nil { - return nil, err - } - - if len(sset) == 0 { - return nil, nil - } - - sectorByID := make(map[uint64]proof7.ExtendedSectorInfo, len(sset)) - for _, sector := range sset { - sectorByID[uint64(sector.SectorNumber)] = proof7.ExtendedSectorInfo{ - SectorNumber: sector.SectorNumber, - SealedCID: sector.SealedCID, - SealProof: sector.SealProof, - SectorKey: sector.SectorKeyCID, - } - } - - proofSectors := make([]proof7.ExtendedSectorInfo, 0, len(sset)) - if err := allSectors.ForEach(func(sectorNo uint64) error { - if info, found := sectorByID[sectorNo]; found { - proofSectors = append(proofSectors, info) - } //else { - //skip - // todo: testing: old logic used to put 'substitute' sectors here - // that probably isn't needed post nv19, but we do need to check that - //} - return nil - }); err != nil { - return nil, xerrors.Errorf("iterating partition sector bitmap: %w", err) - } - - return proofSectors, nil -} - -func (t *WdPostTask) generateWindowPoSt(ctx context.Context, ppt abi.RegisteredPoStProof, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, []abi.SectorID, error) { - var retErr error - randomness[31] &= 0x3f - - out := make([]proof.PoStProof, 0) - - if len(sectorInfo) == 0 { - return nil, nil, xerrors.New("generate window post len(sectorInfo)=0") - } - - maxPartitionSize, err := builtin.PoStProofWindowPoStPartitionSectors(ppt) // todo proxy through chain/actors - if err != nil { - return nil, nil, xerrors.Errorf("get sectors count of partition failed:%+v", err) - } - - // The partitions number of this batch - // ceil(sectorInfos / maxPartitionSize) - partitionCount := uint64((len(sectorInfo) + int(maxPartitionSize) - 1) / int(maxPartitionSize)) - if partitionCount > 1 { - return nil, nil, xerrors.Errorf("generateWindowPoSt partitionCount:%d, only support 1", partitionCount) - } - - log.Infof("generateWindowPoSt maxPartitionSize:%d partitionCount:%d", maxPartitionSize, partitionCount) - - var skipped []abi.SectorID - var flk sync.Mutex - cctx, cancel := context.WithCancel(ctx) - defer cancel() - - sort.Slice(sectorInfo, func(i, j int) bool { - return sectorInfo[i].SectorNumber < sectorInfo[j].SectorNumber - }) - - sectorNums := make([]abi.SectorNumber, len(sectorInfo)) - sectorMap := make(map[abi.SectorNumber]proof.ExtendedSectorInfo) - for i, s := range sectorInfo { - sectorNums[i] = s.SectorNumber - sectorMap[s.SectorNumber] = s - } - - postChallenges, err := ffi.GeneratePoStFallbackSectorChallenges(ppt, minerID, randomness, sectorNums) - if err != nil { - return nil, nil, xerrors.Errorf("generating fallback challenges: %v", err) - } - - proofList := make([]ffi.PartitionProof, partitionCount) - var wg sync.WaitGroup - wg.Add(int(partitionCount)) - - for partIdx := uint64(0); partIdx < partitionCount; partIdx++ { - go func(partIdx uint64) { - defer wg.Done() - - sectors := make([]storiface.PostSectorChallenge, 0) - for i := uint64(0); i < maxPartitionSize; i++ { - si := i + partIdx*maxPartitionSize - if si >= uint64(len(postChallenges.Sectors)) { - break - } - - snum := postChallenges.Sectors[si] - sinfo := sectorMap[snum] - - sectors = append(sectors, storiface.PostSectorChallenge{ - SealProof: sinfo.SealProof, - SectorNumber: snum, - SealedCID: sinfo.SealedCID, - Challenge: postChallenges.Challenges[snum], - Update: sinfo.SectorKey != nil, - }) - } - - pr, err := t.prover.GenerateWindowPoStAdv(cctx, ppt, minerID, sectors, int(partIdx), randomness, true) - sk := pr.Skipped - - if err != nil || len(sk) > 0 { - log.Errorf("generateWindowPost part:%d, skipped:%d, sectors: %d, err: %+v", partIdx, len(sk), len(sectors), err) - flk.Lock() - skipped = append(skipped, sk...) - - if err != nil { - retErr = multierr.Append(retErr, xerrors.Errorf("partitionIndex:%d err:%+v", partIdx, err)) - } - flk.Unlock() - } - - proofList[partIdx] = ffi.PartitionProof(pr.PoStProofs) - }(partIdx) - } - - wg.Wait() - - if len(skipped) > 0 { - log.Warnw("generateWindowPoSt skipped sectors", "skipped", len(skipped)) - } - - postProofs, err := ffi.MergeWindowPoStPartitionProofs(ppt, proofList) - if err != nil { - return nil, skipped, xerrors.Errorf("merge windowPoSt partition proofs: %v", err) - } - - out = append(out, *postProofs) - return out, skipped, retErr -} diff --git a/curiosrc/window/compute_task.go b/curiosrc/window/compute_task.go deleted file mode 100644 index 541a2d5e2c1..00000000000 --- a/curiosrc/window/compute_task.go +++ /dev/null @@ -1,443 +0,0 @@ -package window - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "sort" - "strings" - - logging "github.com/ipfs/go-log/v2" - "github.com/samber/lo" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/go-state-types/dline" - "github.com/filecoin-project/go-state-types/network" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/curiosrc/chainsched" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/lib/harmony/taskhelp" - "github.com/filecoin-project/lotus/lib/promise" - "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/storage/sealer" - "github.com/filecoin-project/lotus/storage/sealer/sealtasks" - "github.com/filecoin-project/lotus/storage/sealer/storiface" - "github.com/filecoin-project/lotus/storage/wdpost" -) - -var log = logging.Logger("curio/window") - -var EpochsPerDeadline = miner.WPoStProvingPeriod() / abi.ChainEpoch(miner.WPoStPeriodDeadlines) - -type WdPostTaskDetails struct { - Ts *types.TipSet - Deadline *dline.Info -} - -type WDPoStAPI interface { - ChainHead(context.Context) (*types.TipSet, error) - ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) - StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) - StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) - ChainGetTipSetAfterHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) - StateMinerPartitions(context.Context, address.Address, uint64, types.TipSetKey) ([]api.Partition, error) - StateGetRandomnessFromBeacon(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) - StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) - StateMinerSectors(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) -} - -type ProverPoSt interface { - GenerateWindowPoStAdv(ctx context.Context, ppt abi.RegisteredPoStProof, mid abi.ActorID, sectors []storiface.PostSectorChallenge, partitionIdx int, randomness abi.PoStRandomness, allowSkip bool) (storiface.WindowPoStResult, error) -} - -type WdPostTask struct { - api WDPoStAPI - db *harmonydb.DB - - faultTracker sealer.FaultTracker - prover ProverPoSt - verifier storiface.Verifier - - windowPoStTF promise.Promise[harmonytask.AddTaskFunc] - - actors map[dtypes.MinerAddress]bool - max int -} - -type wdTaskIdentity struct { - SpID uint64 `db:"sp_id"` - ProvingPeriodStart abi.ChainEpoch `db:"proving_period_start"` - DeadlineIndex uint64 `db:"deadline_index"` - PartitionIndex uint64 `db:"partition_index"` -} - -func NewWdPostTask(db *harmonydb.DB, - api WDPoStAPI, - faultTracker sealer.FaultTracker, - prover ProverPoSt, - verifier storiface.Verifier, - pcs *chainsched.CurioChainSched, - actors map[dtypes.MinerAddress]bool, - max int, -) (*WdPostTask, error) { - t := &WdPostTask{ - db: db, - api: api, - - faultTracker: faultTracker, - prover: prover, - verifier: verifier, - - actors: actors, - max: max, - } - - if pcs != nil { - if err := pcs.AddHandler(t.processHeadChange); err != nil { - return nil, err - } - } - - return t, nil -} - -func (t *WdPostTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - log.Debugw("WdPostTask.Do()", "taskID", taskID) - - var spID, pps, dlIdx, partIdx uint64 - - err = t.db.QueryRow(context.Background(), - `Select sp_id, proving_period_start, deadline_index, partition_index - from wdpost_partition_tasks - where task_id = $1`, taskID).Scan( - &spID, &pps, &dlIdx, &partIdx, - ) - if err != nil { - log.Errorf("WdPostTask.Do() failed to queryRow: %v", err) - return false, err - } - - head, err := t.api.ChainHead(context.Background()) - if err != nil { - log.Errorf("WdPostTask.Do() failed to get chain head: %v", err) - return false, err - } - - deadline := wdpost.NewDeadlineInfo(abi.ChainEpoch(pps), dlIdx, head.Height()) - - var testTask *int - isTestTask := func() bool { - if testTask != nil { - return *testTask > 0 - } - - testTask = new(int) - err := t.db.QueryRow(context.Background(), `SELECT COUNT(*) FROM harmony_test WHERE task_id = $1`, taskID).Scan(testTask) - if err != nil { - log.Errorf("WdPostTask.Do() failed to queryRow: %v", err) - return false - } - - return *testTask > 0 - } - - if deadline.PeriodElapsed() && !isTestTask() { - log.Errorf("WdPost removed stale task: %v %v", taskID, deadline) - return true, nil - } - - if deadline.Challenge > head.Height() { - if isTestTask() { - deadline = wdpost.NewDeadlineInfo(abi.ChainEpoch(pps)-deadline.WPoStProvingPeriod, dlIdx, head.Height()-deadline.WPoStProvingPeriod) - log.Warnw("Test task is in the future, adjusting to past", "taskID", taskID, "deadline", deadline) - } - } - - maddr, err := address.NewIDAddress(spID) - if err != nil { - log.Errorf("WdPostTask.Do() failed to NewIDAddress: %v", err) - return false, err - } - - ts, err := t.api.ChainGetTipSetAfterHeight(context.Background(), deadline.Challenge, head.Key()) - if err != nil { - log.Errorf("WdPostTask.Do() failed to ChainGetTipSetAfterHeight: %v", err) - return false, err - } - - postOut, err := t.DoPartition(context.Background(), ts, maddr, deadline, partIdx) - if err != nil { - log.Errorf("WdPostTask.Do() failed to doPartition: %v", err) - return false, err - } - - var msgbuf bytes.Buffer - if err := postOut.MarshalCBOR(&msgbuf); err != nil { - return false, xerrors.Errorf("marshaling PoSt: %w", err) - } - - if isTestTask() { - // Do not send test tasks to the chain but to harmony_test & stdout. - - data, err := json.MarshalIndent(map[string]any{ - "sp_id": spID, - "proving_period_start": pps, - "deadline": deadline.Index, - "partition": partIdx, - "submit_at_epoch": deadline.Open, - "submit_by_epoch": deadline.Close, - "proof_params": msgbuf.Bytes(), - }, "", " ") - if err != nil { - return false, xerrors.Errorf("marshaling message: %w", err) - } - ctx := context.Background() - _, err = t.db.Exec(ctx, `UPDATE harmony_test SET result=$1 WHERE task_id=$2`, string(data), taskID) - if err != nil { - return false, xerrors.Errorf("updating harmony_test: %w", err) - } - log.Infof("SKIPPED sending test message to chain. SELECT * FROM harmony_test WHERE task_id= %v", taskID) - return true, nil // nothing committed - } - // Insert into wdpost_proofs table - n, err := t.db.Exec(context.Background(), - `INSERT INTO wdpost_proofs ( - sp_id, - proving_period_start, - deadline, - partition, - submit_at_epoch, - submit_by_epoch, - proof_params) - VALUES ($1, $2, $3, $4, $5, $6, $7)`, - spID, - pps, - deadline.Index, - partIdx, - deadline.Open, - deadline.Close, - msgbuf.Bytes(), - ) - - if err != nil { - log.Errorf("WdPostTask.Do() failed to insert into wdpost_proofs: %v", err) - return false, err - } - if n != 1 { - log.Errorf("WdPostTask.Do() failed to insert into wdpost_proofs: %v", err) - return false, err - } - - return true, nil -} - -func entToStr[T any](t T, i int) string { - return fmt.Sprint(t) -} - -func (t *WdPostTask) CanAccept(ids []harmonytask.TaskID, te *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - // GetEpoch - ts, err := t.api.ChainHead(context.Background()) - - if err != nil { - return nil, err - } - - // GetData for tasks - type wdTaskDef struct { - TaskID harmonytask.TaskID - SpID uint64 - ProvingPeriodStart abi.ChainEpoch - DeadlineIndex uint64 - PartitionIndex uint64 - - dlInfo *dline.Info `pgx:"-"` - } - var tasks []wdTaskDef - - err = t.db.Select(context.Background(), &tasks, - `Select - task_id, - sp_id, - proving_period_start, - deadline_index, - partition_index - from wdpost_partition_tasks - where task_id IN (SELECT unnest(string_to_array($1, ','))::bigint)`, strings.Join(lo.Map(ids, entToStr[harmonytask.TaskID]), ",")) - if err != nil { - return nil, err - } - - // Accept those past deadline, then delete them in Do(). - for i := range tasks { - tasks[i].dlInfo = wdpost.NewDeadlineInfo(tasks[i].ProvingPeriodStart, tasks[i].DeadlineIndex, ts.Height()) - - if tasks[i].dlInfo.PeriodElapsed() { - // note: Those may be test tasks - return &tasks[i].TaskID, nil - } - } - - // todo fix the block below - // workAdderMutex is held by taskTypeHandler.considerWork, which calls this CanAccept - // te.ResourcesAvailable will try to get that lock again, which will deadlock - - // Discard those too big for our free RAM - /*freeRAM := te.ResourcesAvailable().Ram - tasks = lo.Filter(tasks, func(d wdTaskDef, _ int) bool { - maddr, err := address.NewIDAddress(tasks[0].Sp_id) - if err != nil { - log.Errorf("WdPostTask.CanAccept() failed to NewIDAddress: %v", err) - return false - } - - mi, err := t.api.StateMinerInfo(context.Background(), maddr, ts.Key()) - if err != nil { - log.Errorf("WdPostTask.CanAccept() failed to StateMinerInfo: %v", err) - return false - } - - spt, err := policy.GetSealProofFromPoStProof(mi.WindowPoStProofType) - if err != nil { - log.Errorf("WdPostTask.CanAccept() failed to GetSealProofFromPoStProof: %v", err) - return false - } - - return res[spt].MaxMemory <= freeRAM - })*/ - if len(tasks) == 0 { - log.Infof("RAM too small for any WDPost task") - return nil, nil - } - - // Ignore those with too many failures unless they are the only ones left. - tasks, _ = taskhelp.SliceIfFound(tasks, func(d wdTaskDef) bool { - var r int - err := t.db.QueryRow(context.Background(), `SELECT COUNT(*) - FROM harmony_task_history - WHERE task_id = $1 AND result = false`, d.TaskID).Scan(&r) - if err != nil { - log.Errorf("WdPostTask.CanAccept() failed to queryRow: %v", err) - } - return r < 2 - }) - - // Select the one closest to the deadline - sort.Slice(tasks, func(i, j int) bool { - return tasks[i].dlInfo.Open < tasks[j].dlInfo.Open - }) - - return &tasks[0].TaskID, nil -} - -var res = storiface.ResourceTable[sealtasks.TTGenerateWindowPoSt] - -func (t *WdPostTask) TypeDetails() harmonytask.TaskTypeDetails { - return harmonytask.TaskTypeDetails{ - Name: "WdPost", - Max: t.max, - MaxFailures: 3, - Follows: nil, - Cost: resources.Resources{ - Cpu: 1, - - // todo set to something for 32/64G sector sizes? Technically windowPoSt is happy on a CPU - // but it will use a GPU if available - Gpu: 0, - - // RAM of smallest proof's max is listed here - Ram: lo.Reduce(lo.Keys(res), func(i uint64, k abi.RegisteredSealProof, _ int) uint64 { - if res[k].MaxMemory < i { - return res[k].MaxMemory - } - return i - }, 1<<63), - }, - } -} - -func (t *WdPostTask) Adder(taskFunc harmonytask.AddTaskFunc) { - t.windowPoStTF.Set(taskFunc) -} - -func (t *WdPostTask) processHeadChange(ctx context.Context, revert, apply *types.TipSet) error { - for act := range t.actors { - maddr := address.Address(act) - - aid, err := address.IDFromAddress(maddr) - if err != nil { - return xerrors.Errorf("getting miner ID: %w", err) - } - - di, err := t.api.StateMinerProvingDeadline(ctx, maddr, apply.Key()) - if err != nil { - return err - } - - if !di.PeriodStarted() { - return nil // not proving anything yet - } - - partitions, err := t.api.StateMinerPartitions(ctx, maddr, di.Index, apply.Key()) - if err != nil { - return xerrors.Errorf("getting partitions: %w", err) - } - - // TODO: Batch Partitions?? - - for pidx := range partitions { - tid := wdTaskIdentity{ - SpID: aid, - ProvingPeriodStart: di.PeriodStart, - DeadlineIndex: di.Index, - PartitionIndex: uint64(pidx), - } - - tf := t.windowPoStTF.Val(ctx) - if tf == nil { - return xerrors.Errorf("no task func") - } - - tf(func(id harmonytask.TaskID, tx *harmonydb.Tx) (bool, error) { - return t.addTaskToDB(id, tid, tx) - }) - } - } - - return nil -} - -func (t *WdPostTask) addTaskToDB(taskId harmonytask.TaskID, taskIdent wdTaskIdentity, tx *harmonydb.Tx) (bool, error) { - - _, err := tx.Exec( - `INSERT INTO wdpost_partition_tasks ( - task_id, - sp_id, - proving_period_start, - deadline_index, - partition_index - ) VALUES ($1, $2, $3, $4, $5)`, - taskId, - taskIdent.SpID, - taskIdent.ProvingPeriodStart, - taskIdent.DeadlineIndex, - taskIdent.PartitionIndex, - ) - if err != nil { - return false, xerrors.Errorf("insert partition task: %w", err) - } - - return true, nil -} - -var _ harmonytask.TaskInterface = &WdPostTask{} diff --git a/curiosrc/window/faults_simple.go b/curiosrc/window/faults_simple.go deleted file mode 100644 index 64f5e86506c..00000000000 --- a/curiosrc/window/faults_simple.go +++ /dev/null @@ -1,152 +0,0 @@ -package window - -import ( - "context" - "crypto/rand" - "fmt" - "sync" - "time" - - "golang.org/x/xerrors" - - ffi "github.com/filecoin-project/filecoin-ffi" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/storage/paths" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -type SimpleFaultTracker struct { - storage paths.Store - index paths.SectorIndex - - parallelCheckLimit int // todo live config? - singleCheckTimeout time.Duration - partitionCheckTimeout time.Duration -} - -func NewSimpleFaultTracker(storage paths.Store, index paths.SectorIndex, - parallelCheckLimit int, singleCheckTimeout time.Duration, partitionCheckTimeout time.Duration) *SimpleFaultTracker { - return &SimpleFaultTracker{ - storage: storage, - index: index, - - parallelCheckLimit: parallelCheckLimit, - singleCheckTimeout: singleCheckTimeout, - partitionCheckTimeout: partitionCheckTimeout, - } -} - -func (m *SimpleFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - if rg == nil { - return nil, xerrors.Errorf("rg is nil") - } - - var bad = make(map[abi.SectorID]string) - var badLk sync.Mutex - - var postRand abi.PoStRandomness = make([]byte, abi.RandomnessLength) - _, _ = rand.Read(postRand) - postRand[31] &= 0x3f - - limit := m.parallelCheckLimit - if limit <= 0 { - limit = len(sectors) - } - throttle := make(chan struct{}, limit) - - addBad := func(s abi.SectorID, reason string) { - badLk.Lock() - bad[s] = reason - badLk.Unlock() - } - - if m.partitionCheckTimeout > 0 { - var cancel2 context.CancelFunc - ctx, cancel2 = context.WithTimeout(ctx, m.partitionCheckTimeout) - defer cancel2() - } - - var wg sync.WaitGroup - wg.Add(len(sectors)) - - for _, sector := range sectors { - select { - case throttle <- struct{}{}: - case <-ctx.Done(): - addBad(sector.ID, fmt.Sprintf("waiting for check worker: %s", ctx.Err())) - wg.Done() - continue - } - - go func(sector storiface.SectorRef) { - defer wg.Done() - defer func() { - <-throttle - }() - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - commr, update, err := rg(ctx, sector.ID) - if err != nil { - log.Warnw("CheckProvable Sector FAULT: getting commR", "sector", sector, "sealed", "err", err) - addBad(sector.ID, fmt.Sprintf("getting commR: %s", err)) - return - } - - toLock := storiface.FTSealed | storiface.FTCache - if update { - toLock = storiface.FTUpdate | storiface.FTUpdateCache - } - - locked, err := m.index.StorageTryLock(ctx, sector.ID, toLock, storiface.FTNone) - if err != nil { - addBad(sector.ID, fmt.Sprintf("tryLock error: %s", err)) - return - } - - if !locked { - log.Warnw("CheckProvable Sector FAULT: can't acquire read lock", "sector", sector) - addBad(sector.ID, "can't acquire read lock") - return - } - - ch, err := ffi.GeneratePoStFallbackSectorChallenges(pp, sector.ID.Miner, postRand, []abi.SectorNumber{ - sector.ID.Number, - }) - if err != nil { - log.Warnw("CheckProvable Sector FAULT: generating challenges", "sector", sector, "err", err) - addBad(sector.ID, fmt.Sprintf("generating fallback challenges: %s", err)) - return - } - - vctx := ctx - - if m.singleCheckTimeout > 0 { - var cancel2 context.CancelFunc - vctx, cancel2 = context.WithTimeout(ctx, m.singleCheckTimeout) - defer cancel2() - } - - _, err = m.storage.GenerateSingleVanillaProof(vctx, sector.ID.Miner, storiface.PostSectorChallenge{ - SealProof: sector.ProofType, - SectorNumber: sector.ID.Number, - SealedCID: commr, - Challenge: ch.Challenges[sector.ID.Number], - Update: update, - }, pp) - if err != nil { - log.Warnw("CheckProvable Sector FAULT: generating vanilla proof", "sector", sector, "err", err) - addBad(sector.ID, fmt.Sprintf("generating vanilla proof: %s", err)) - return - } - }(sector) - } - - wg.Wait() - - return bad, nil -} diff --git a/curiosrc/window/recover_task.go b/curiosrc/window/recover_task.go deleted file mode 100644 index 1ed110978c1..00000000000 --- a/curiosrc/window/recover_task.go +++ /dev/null @@ -1,324 +0,0 @@ -package window - -import ( - "context" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/dline" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/curiosrc/chainsched" - "github.com/filecoin-project/lotus/curiosrc/message" - "github.com/filecoin-project/lotus/curiosrc/multictladdr" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/lib/promise" - "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/storage/sealer" - "github.com/filecoin-project/lotus/storage/wdpost" -) - -type WdPostRecoverDeclareTask struct { - sender *message.Sender - db *harmonydb.DB - api WdPostRecoverDeclareTaskApi - faultTracker sealer.FaultTracker - - maxDeclareRecoveriesGasFee types.FIL - as *multictladdr.MultiAddressSelector - actors map[dtypes.MinerAddress]bool - - startCheckTF promise.Promise[harmonytask.AddTaskFunc] -} - -type WdPostRecoverDeclareTaskApi interface { - ChainHead(context.Context) (*types.TipSet, error) - StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) - StateMinerPartitions(context.Context, address.Address, uint64, types.TipSetKey) ([]api.Partition, error) - StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) - StateMinerSectors(ctx context.Context, addr address.Address, bf *bitfield.BitField, tsk types.TipSetKey) ([]*miner.SectorOnChainInfo, error) - - GasEstimateMessageGas(context.Context, *types.Message, *api.MessageSendSpec, types.TipSetKey) (*types.Message, error) - GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) - GasEstimateGasPremium(_ context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) - - WalletBalance(context.Context, address.Address) (types.BigInt, error) - WalletHas(context.Context, address.Address) (bool, error) - StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) - StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) -} - -func NewWdPostRecoverDeclareTask(sender *message.Sender, - db *harmonydb.DB, - api WdPostRecoverDeclareTaskApi, - faultTracker sealer.FaultTracker, - as *multictladdr.MultiAddressSelector, - pcs *chainsched.CurioChainSched, - - maxDeclareRecoveriesGasFee types.FIL, - actors map[dtypes.MinerAddress]bool) (*WdPostRecoverDeclareTask, error) { - t := &WdPostRecoverDeclareTask{ - sender: sender, - db: db, - api: api, - faultTracker: faultTracker, - - maxDeclareRecoveriesGasFee: maxDeclareRecoveriesGasFee, - as: as, - actors: actors, - } - - if pcs != nil { - if err := pcs.AddHandler(t.processHeadChange); err != nil { - return nil, err - } - } - - return t, nil -} - -func (w *WdPostRecoverDeclareTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - log.Debugw("WdPostRecoverDeclareTask.Do()", "taskID", taskID) - ctx := context.Background() - - var spID, pps, dlIdx, partIdx uint64 - - err = w.db.QueryRow(context.Background(), - `Select sp_id, proving_period_start, deadline_index, partition_index - from wdpost_recovery_tasks - where task_id = $1`, taskID).Scan( - &spID, &pps, &dlIdx, &partIdx, - ) - if err != nil { - log.Errorf("WdPostRecoverDeclareTask.Do() failed to queryRow: %v", err) - return false, err - } - - head, err := w.api.ChainHead(context.Background()) - if err != nil { - log.Errorf("WdPostRecoverDeclareTask.Do() failed to get chain head: %v", err) - return false, err - } - - deadline := wdpost.NewDeadlineInfo(abi.ChainEpoch(pps), dlIdx, head.Height()) - - if deadline.FaultCutoffPassed() { - log.Errorf("WdPostRecover removed stale task: %v %v", taskID, deadline) - return true, nil - } - - maddr, err := address.NewIDAddress(spID) - if err != nil { - log.Errorf("WdPostTask.Do() failed to NewIDAddress: %v", err) - return false, err - } - - partitions, err := w.api.StateMinerPartitions(context.Background(), maddr, dlIdx, head.Key()) - if err != nil { - log.Errorf("WdPostRecoverDeclareTask.Do() failed to get partitions: %v", err) - return false, err - } - - if partIdx >= uint64(len(partitions)) { - log.Errorf("WdPostRecoverDeclareTask.Do() failed to get partitions: partIdx >= len(partitions)") - return false, err - } - - partition := partitions[partIdx] - - unrecovered, err := bitfield.SubtractBitField(partition.FaultySectors, partition.RecoveringSectors) - if err != nil { - return false, xerrors.Errorf("subtracting recovered set from fault set: %w", err) - } - - uc, err := unrecovered.Count() - if err != nil { - return false, xerrors.Errorf("counting unrecovered sectors: %w", err) - } - - if uc == 0 { - log.Warnw("nothing to declare recovered", "maddr", maddr, "deadline", deadline, "partition", partIdx) - return true, nil - } - - recovered, err := checkSectors(ctx, w.api, w.faultTracker, maddr, unrecovered, head.Key()) - if err != nil { - return false, xerrors.Errorf("checking unrecovered sectors: %w", err) - } - - // if all sectors failed to recover, don't declare recoveries - recoveredCount, err := recovered.Count() - if err != nil { - return false, xerrors.Errorf("counting recovered sectors: %w", err) - } - - if recoveredCount == 0 { - log.Warnw("no sectors recovered", "maddr", maddr, "deadline", deadline, "partition", partIdx) - return true, nil - } - - recDecl := miner.RecoveryDeclaration{ - Deadline: dlIdx, - Partition: partIdx, - Sectors: recovered, - } - - params := &miner.DeclareFaultsRecoveredParams{ - Recoveries: []miner.RecoveryDeclaration{recDecl}, - } - - enc, aerr := actors.SerializeParams(params) - if aerr != nil { - return false, xerrors.Errorf("could not serialize declare recoveries parameters: %w", aerr) - } - - msg := &types.Message{ - To: maddr, - Method: builtin.MethodsMiner.DeclareFaultsRecovered, - Params: enc, - Value: types.NewInt(0), - } - - msg, mss, err := preparePoStMessage(w.api, w.as, maddr, msg, abi.TokenAmount(w.maxDeclareRecoveriesGasFee)) - if err != nil { - return false, xerrors.Errorf("sending declare recoveries message: %w", err) - } - - mc, err := w.sender.Send(ctx, msg, mss, "declare-recoveries") - if err != nil { - return false, xerrors.Errorf("sending declare recoveries message: %w", err) - } - - log.Debugw("WdPostRecoverDeclareTask.Do() sent declare recoveries message", "maddr", maddr, "deadline", deadline, "partition", partIdx, "mc", mc) - return true, nil -} - -func (w *WdPostRecoverDeclareTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - if len(ids) == 0 { - // probably can't happen, but panicking is bad - return nil, nil - } - - if w.sender == nil { - // we can't send messages - return nil, nil - } - - return &ids[0], nil -} - -func (w *WdPostRecoverDeclareTask) TypeDetails() harmonytask.TaskTypeDetails { - return harmonytask.TaskTypeDetails{ - Max: 128, - Name: "WdPostRecover", - Cost: resources.Resources{ - Cpu: 1, - Gpu: 0, - Ram: 128 << 20, - }, - MaxFailures: 10, - Follows: nil, - } -} - -func (w *WdPostRecoverDeclareTask) Adder(taskFunc harmonytask.AddTaskFunc) { - w.startCheckTF.Set(taskFunc) -} - -func (w *WdPostRecoverDeclareTask) processHeadChange(ctx context.Context, revert, apply *types.TipSet) error { - tf := w.startCheckTF.Val(ctx) - - for act := range w.actors { - maddr := address.Address(act) - - aid, err := address.IDFromAddress(maddr) - if err != nil { - return xerrors.Errorf("getting miner ID: %w", err) - } - - di, err := w.api.StateMinerProvingDeadline(ctx, maddr, apply.Key()) - if err != nil { - return err - } - - if !di.PeriodStarted() { - return nil // not proving anything yet - } - - // declaring two deadlines ahead - declDeadline := (di.Index + 2) % di.WPoStPeriodDeadlines - - pps := di.PeriodStart - if declDeadline != di.Index+2 { - pps = di.NextPeriodStart() - } - - partitions, err := w.api.StateMinerPartitions(ctx, maddr, declDeadline, apply.Key()) - if err != nil { - return xerrors.Errorf("getting partitions: %w", err) - } - - for pidx, partition := range partitions { - unrecovered, err := bitfield.SubtractBitField(partition.FaultySectors, partition.RecoveringSectors) - if err != nil { - return xerrors.Errorf("subtracting recovered set from fault set: %w", err) - } - - uc, err := unrecovered.Count() - if err != nil { - return xerrors.Errorf("counting unrecovered sectors: %w", err) - } - - if uc == 0 { - log.Debugw("WdPostRecoverDeclareTask.processHeadChange() uc == 0, skipping", "maddr", maddr, "declDeadline", declDeadline, "pidx", pidx) - continue - } - - tid := wdTaskIdentity{ - SpID: aid, - ProvingPeriodStart: pps, - DeadlineIndex: declDeadline, - PartitionIndex: uint64(pidx), - } - - tf(func(id harmonytask.TaskID, tx *harmonydb.Tx) (bool, error) { - return w.addTaskToDB(id, tid, tx) - }) - } - } - - return nil -} - -func (w *WdPostRecoverDeclareTask) addTaskToDB(taskId harmonytask.TaskID, taskIdent wdTaskIdentity, tx *harmonydb.Tx) (bool, error) { - _, err := tx.Exec( - `INSERT INTO wdpost_recovery_tasks ( - task_id, - sp_id, - proving_period_start, - deadline_index, - partition_index - ) VALUES ($1, $2, $3, $4, $5)`, - taskId, - taskIdent.SpID, - taskIdent.ProvingPeriodStart, - taskIdent.DeadlineIndex, - taskIdent.PartitionIndex, - ) - if err != nil { - return false, xerrors.Errorf("insert partition task: %w", err) - } - - return true, nil -} - -var _ harmonytask.TaskInterface = &WdPostRecoverDeclareTask{} diff --git a/curiosrc/window/submit_task.go b/curiosrc/window/submit_task.go deleted file mode 100644 index 330fd050902..00000000000 --- a/curiosrc/window/submit_task.go +++ /dev/null @@ -1,307 +0,0 @@ -package window - -import ( - "bytes" - "context" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v9/miner" - "github.com/filecoin-project/go-state-types/crypto" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/curiosrc/chainsched" - "github.com/filecoin-project/lotus/curiosrc/message" - "github.com/filecoin-project/lotus/curiosrc/multictladdr" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/lib/promise" - "github.com/filecoin-project/lotus/storage/wdpost" -) - -type WdPoStSubmitTaskApi interface { - ChainHead(context.Context) (*types.TipSet, error) - - WalletBalance(context.Context, address.Address) (types.BigInt, error) - WalletHas(context.Context, address.Address) (bool, error) - - StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) - StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) - StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) - StateGetRandomnessFromTickets(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) - - GasEstimateMessageGas(context.Context, *types.Message, *api.MessageSendSpec, types.TipSetKey) (*types.Message, error) - GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) - GasEstimateGasPremium(_ context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) -} - -type WdPostSubmitTask struct { - sender *message.Sender - db *harmonydb.DB - api WdPoStSubmitTaskApi - - maxWindowPoStGasFee types.FIL - as *multictladdr.MultiAddressSelector - - submitPoStTF promise.Promise[harmonytask.AddTaskFunc] -} - -func NewWdPostSubmitTask(pcs *chainsched.CurioChainSched, send *message.Sender, db *harmonydb.DB, api WdPoStSubmitTaskApi, maxWindowPoStGasFee types.FIL, as *multictladdr.MultiAddressSelector) (*WdPostSubmitTask, error) { - res := &WdPostSubmitTask{ - sender: send, - db: db, - api: api, - - maxWindowPoStGasFee: maxWindowPoStGasFee, - as: as, - } - - if pcs != nil { - if err := pcs.AddHandler(res.processHeadChange); err != nil { - return nil, err - } - } - - return res, nil -} - -func (w *WdPostSubmitTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - log.Debugw("WdPostSubmitTask.Do", "taskID", taskID) - - var spID uint64 - var deadline uint64 - var partition uint64 - var pps, submitAtEpoch, submitByEpoch abi.ChainEpoch - var earlyParamBytes []byte - var dbTask uint64 - - err = w.db.QueryRow( - context.Background(), `SELECT sp_id, proving_period_start, deadline, partition, submit_at_epoch, submit_by_epoch, proof_params, submit_task_id - FROM wdpost_proofs WHERE submit_task_id = $1`, taskID, - ).Scan(&spID, &pps, &deadline, &partition, &submitAtEpoch, &submitByEpoch, &earlyParamBytes, &dbTask) - if err != nil { - return false, xerrors.Errorf("query post proof: %w", err) - } - - if dbTask != uint64(taskID) { - return false, xerrors.Errorf("taskID mismatch: %d != %d", dbTask, taskID) - } - - head, err := w.api.ChainHead(context.Background()) - if err != nil { - return false, xerrors.Errorf("getting chain head: %w", err) - } - - if head.Height() > submitByEpoch { - // we missed the deadline, no point in submitting - log.Errorw("missed submit deadline", "spID", spID, "deadline", deadline, "partition", partition, "submitByEpoch", submitByEpoch, "headHeight", head.Height()) - return true, nil - } - - if head.Height() < submitAtEpoch { - log.Errorw("submit epoch not reached", "spID", spID, "deadline", deadline, "partition", partition, "submitAtEpoch", submitAtEpoch, "headHeight", head.Height()) - return false, xerrors.Errorf("submit epoch not reached: %d < %d", head.Height(), submitAtEpoch) - } - - dlInfo := wdpost.NewDeadlineInfo(pps, deadline, head.Height()) - - var params miner.SubmitWindowedPoStParams - if err := params.UnmarshalCBOR(bytes.NewReader(earlyParamBytes)); err != nil { - return false, xerrors.Errorf("unmarshaling proof message: %w", err) - } - - commEpoch := dlInfo.Challenge - - commRand, err := w.api.StateGetRandomnessFromTickets(context.Background(), crypto.DomainSeparationTag_PoStChainCommit, commEpoch, nil, head.Key()) - if err != nil { - err = xerrors.Errorf("failed to get chain randomness from tickets for windowPost (epoch=%d): %w", commEpoch, err) - log.Errorf("submitPoStMessage failed: %+v", err) - - return false, xerrors.Errorf("getting post commit randomness: %w", err) - } - - params.ChainCommitEpoch = commEpoch - params.ChainCommitRand = commRand - - var pbuf bytes.Buffer - if err := params.MarshalCBOR(&pbuf); err != nil { - return false, xerrors.Errorf("marshaling proof message: %w", err) - } - - maddr, err := address.NewIDAddress(spID) - if err != nil { - return false, xerrors.Errorf("invalid miner address: %w", err) - } - - msg := &types.Message{ - To: maddr, - Method: builtin.MethodsMiner.SubmitWindowedPoSt, - Params: pbuf.Bytes(), - Value: big.Zero(), - } - - msg, mss, err := preparePoStMessage(w.api, w.as, maddr, msg, abi.TokenAmount(w.maxWindowPoStGasFee)) - if err != nil { - return false, xerrors.Errorf("preparing proof message: %w", err) - } - - ctx := context.Background() - smsg, err := w.sender.Send(ctx, msg, mss, "wdpost") - if err != nil { - return false, xerrors.Errorf("sending proof message: %w", err) - } - - // set message_cid in the wdpost_proofs entry - - _, err = w.db.Exec(ctx, `UPDATE wdpost_proofs SET message_cid = $1 WHERE sp_id = $2 AND proving_period_start = $3 AND deadline = $4 AND partition = $5`, smsg.String(), spID, pps, deadline, partition) - if err != nil { - return true, xerrors.Errorf("updating wdpost_proofs: %w", err) - } - - return true, nil -} - -func (w *WdPostSubmitTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - if len(ids) == 0 { - // probably can't happen, but panicking is bad - return nil, nil - } - - if w.sender == nil { - // we can't send messages - return nil, nil - } - - return &ids[0], nil -} - -func (w *WdPostSubmitTask) TypeDetails() harmonytask.TaskTypeDetails { - return harmonytask.TaskTypeDetails{ - Max: 128, - Name: "WdPostSubmit", - Cost: resources.Resources{ - Cpu: 0, - Gpu: 0, - Ram: 10 << 20, - }, - MaxFailures: 10, - Follows: nil, // ?? - } -} - -func (w *WdPostSubmitTask) Adder(taskFunc harmonytask.AddTaskFunc) { - w.submitPoStTF.Set(taskFunc) -} - -func (w *WdPostSubmitTask) processHeadChange(ctx context.Context, revert, apply *types.TipSet) error { - tf := w.submitPoStTF.Val(ctx) - - qry, err := w.db.Query(ctx, `SELECT sp_id, proving_period_start, deadline, partition, submit_at_epoch FROM wdpost_proofs WHERE submit_task_id IS NULL AND submit_at_epoch <= $1`, apply.Height()) - if err != nil { - return err - } - defer qry.Close() - - for qry.Next() { - var spID int64 - var pps int64 - var deadline uint64 - var partition uint64 - var submitAtEpoch uint64 - if err := qry.Scan(&spID, &pps, &deadline, &partition, &submitAtEpoch); err != nil { - return xerrors.Errorf("scan submittable posts: %w", err) - } - - tf(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, err error) { - // update in transaction iff submit_task_id is still null - res, err := tx.Exec(`UPDATE wdpost_proofs SET submit_task_id = $1 WHERE sp_id = $2 AND proving_period_start = $3 AND deadline = $4 AND partition = $5 AND submit_task_id IS NULL`, id, spID, pps, deadline, partition) - if err != nil { - return false, xerrors.Errorf("query ready proof: %w", err) - } - if res != 1 { - return false, nil - } - - return true, nil - }) - } - if err := qry.Err(); err != nil { - return err - } - - return nil -} - -type MsgPrepAPI interface { - StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) - GasEstimateMessageGas(context.Context, *types.Message, *api.MessageSendSpec, types.TipSetKey) (*types.Message, error) - GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) - GasEstimateGasPremium(ctx context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) - - WalletBalance(context.Context, address.Address) (types.BigInt, error) - WalletHas(context.Context, address.Address) (bool, error) - StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) - StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) -} - -func preparePoStMessage(w MsgPrepAPI, as *multictladdr.MultiAddressSelector, maddr address.Address, msg *types.Message, maxFee abi.TokenAmount) (*types.Message, *api.MessageSendSpec, error) { - mi, err := w.StateMinerInfo(context.Background(), maddr, types.EmptyTSK) - if err != nil { - return nil, nil, xerrors.Errorf("error getting miner info: %w", err) - } - - // set the worker as a fallback - msg.From = mi.Worker - - mss := &api.MessageSendSpec{ - MaxFee: maxFee, - } - - // (optimal) initial estimation with some overestimation that guarantees - // block inclusion within the next 20 tipsets. - gm, err := w.GasEstimateMessageGas(context.Background(), msg, mss, types.EmptyTSK) - if err != nil { - log.Errorw("estimating gas", "error", err) - return nil, nil, xerrors.Errorf("estimating gas: %w", err) - } - *msg = *gm - - // calculate a more frugal estimation; premium is estimated to guarantee - // inclusion within 5 tipsets, and fee cap is estimated for inclusion - // within 4 tipsets. - minGasFeeMsg := *msg - - minGasFeeMsg.GasPremium, err = w.GasEstimateGasPremium(context.Background(), 5, msg.From, msg.GasLimit, types.EmptyTSK) - if err != nil { - log.Errorf("failed to estimate minimum gas premium: %+v", err) - minGasFeeMsg.GasPremium = msg.GasPremium - } - - minGasFeeMsg.GasFeeCap, err = w.GasEstimateFeeCap(context.Background(), &minGasFeeMsg, 4, types.EmptyTSK) - if err != nil { - log.Errorf("failed to estimate minimum gas fee cap: %+v", err) - minGasFeeMsg.GasFeeCap = msg.GasFeeCap - } - - // goodFunds = funds needed for optimal inclusion probability. - // minFunds = funds needed for more speculative inclusion probability. - goodFunds := big.Add(minGasFeeMsg.RequiredFunds(), minGasFeeMsg.Value) - minFunds := big.Min(big.Add(minGasFeeMsg.RequiredFunds(), minGasFeeMsg.Value), goodFunds) - - from, _, err := as.AddressFor(context.Background(), w, maddr, mi, api.PoStAddr, goodFunds, minFunds) - if err != nil { - return nil, nil, xerrors.Errorf("error getting address: %w", err) - } - - msg.From = from - - return msg, mss, nil -} - -var _ harmonytask.TaskInterface = &WdPostSubmitTask{} diff --git a/curiosrc/winning/winning_task.go b/curiosrc/winning/winning_task.go deleted file mode 100644 index 920a7339422..00000000000 --- a/curiosrc/winning/winning_task.go +++ /dev/null @@ -1,688 +0,0 @@ -package winning - -import ( - "bytes" - "context" - "crypto/rand" - "encoding/binary" - "encoding/json" - "time" - - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - "golang.org/x/xerrors" - - ffi "github.com/filecoin-project/filecoin-ffi" - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/go-state-types/network" - prooftypes "github.com/filecoin-project/go-state-types/proof" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/gen" - lrand "github.com/filecoin-project/lotus/chain/rand" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/lib/promise" - "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -var log = logging.Logger("curio/winning") - -type WinPostTask struct { - max int - db *harmonydb.DB - - prover ProverWinningPoSt - verifier storiface.Verifier - - api WinPostAPI - actors map[dtypes.MinerAddress]bool - - mineTF promise.Promise[harmonytask.AddTaskFunc] -} - -type WinPostAPI interface { - ChainHead(context.Context) (*types.TipSet, error) - ChainTipSetWeight(context.Context, types.TipSetKey) (types.BigInt, error) - ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) - - StateGetBeaconEntry(context.Context, abi.ChainEpoch) (*types.BeaconEntry, error) - SyncSubmitBlock(context.Context, *types.BlockMsg) error - StateGetRandomnessFromBeacon(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) - StateGetRandomnessFromTickets(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) - StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) - StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) - - MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*api.MiningBaseInfo, error) - MinerCreateBlock(context.Context, *api.BlockTemplate) (*types.BlockMsg, error) - MpoolSelect(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error) - - WalletSign(context.Context, address.Address, []byte) (*crypto.Signature, error) -} - -type ProverWinningPoSt interface { - GenerateWinningPoSt(ctx context.Context, ppt abi.RegisteredPoStProof, minerID abi.ActorID, sectorInfo []storiface.PostSectorChallenge, randomness abi.PoStRandomness) ([]prooftypes.PoStProof, error) -} - -func NewWinPostTask(max int, db *harmonydb.DB, prover ProverWinningPoSt, verifier storiface.Verifier, api WinPostAPI, actors map[dtypes.MinerAddress]bool) *WinPostTask { - t := &WinPostTask{ - max: max, - db: db, - prover: prover, - verifier: verifier, - api: api, - actors: actors, - } - // TODO: run warmup - - go t.mineBasic(context.TODO()) - - return t -} - -func (t *WinPostTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - log.Debugw("WinPostTask.Do()", "taskID", taskID) - - ctx := context.TODO() - - type BlockCID struct { - CID string - } - - type MiningTaskDetails struct { - SpID uint64 - Epoch uint64 - BlockCIDs []BlockCID - CompTime time.Time - } - - var details MiningTaskDetails - - // First query to fetch from mining_tasks - err = t.db.QueryRow(ctx, `SELECT sp_id, epoch, base_compute_time FROM mining_tasks WHERE task_id = $1`, taskID).Scan(&details.SpID, &details.Epoch, &details.CompTime) - if err != nil { - return false, xerrors.Errorf("query mining base info fail: %w", err) - } - - // Second query to fetch from mining_base_block - rows, err := t.db.Query(ctx, `SELECT block_cid FROM mining_base_block WHERE task_id = $1`, taskID) - if err != nil { - return false, xerrors.Errorf("query mining base blocks fail: %w", err) - } - defer rows.Close() - - for rows.Next() { - var cid BlockCID - if err := rows.Scan(&cid.CID); err != nil { - return false, err - } - details.BlockCIDs = append(details.BlockCIDs, cid) - } - - if err := rows.Err(); err != nil { - return false, xerrors.Errorf("query mining base blocks fail (rows.Err): %w", err) - } - - // construct base - maddr, err := address.NewIDAddress(details.SpID) - if err != nil { - return false, err - } - - var bcids []cid.Cid - for _, c := range details.BlockCIDs { - bcid, err := cid.Parse(c.CID) - if err != nil { - return false, err - } - bcids = append(bcids, bcid) - } - - tsk := types.NewTipSetKey(bcids...) - baseTs, err := t.api.ChainGetTipSet(ctx, tsk) - if err != nil { - return false, xerrors.Errorf("loading base tipset: %w", err) - } - - base := MiningBase{ - TipSet: baseTs, - AddRounds: abi.ChainEpoch(details.Epoch) - baseTs.Height() - 1, - ComputeTime: details.CompTime, - } - - persistNoWin := func() (bool, error) { - n, err := t.db.Exec(ctx, `UPDATE mining_base_block SET no_win = true WHERE task_id = $1`, taskID) - if err != nil { - return false, xerrors.Errorf("marking base as not-won: %w", err) - } - log.Debugw("persisted no-win", "rows", n) - - if n == 0 { - return false, xerrors.Errorf("persist no win: no rows updated") - } - - return true, nil - } - - // ensure we have a beacon entry for the epoch we're mining on - round := base.epoch() - - _ = retry1(func() (*types.BeaconEntry, error) { - return t.api.StateGetBeaconEntry(ctx, round) - }) - - // MAKE A MINING ATTEMPT!! - log.Debugw("attempting to mine a block", "tipset", types.LogCids(base.TipSet.Cids()), "null-rounds", base.AddRounds) - - mbi, err := t.api.MinerGetBaseInfo(ctx, maddr, round, base.TipSet.Key()) - if err != nil { - return false, xerrors.Errorf("failed to get mining base info: %w", err) - } - if mbi == nil { - // not eligible to mine on this base, we're done here - log.Debugw("WinPoSt not eligible to mine on this base", "tipset", types.LogCids(base.TipSet.Cids())) - return persistNoWin() - } - - if !mbi.EligibleForMining { - // slashed or just have no power yet, we're done here - log.Debugw("WinPoSt not eligible for mining", "tipset", types.LogCids(base.TipSet.Cids())) - return persistNoWin() - } - - if len(mbi.Sectors) == 0 { - log.Warnw("WinPoSt no sectors to mine", "tipset", types.LogCids(base.TipSet.Cids())) - return false, xerrors.Errorf("no sectors selected for winning PoSt") - } - - var rbase types.BeaconEntry - var bvals []types.BeaconEntry - var eproof *types.ElectionProof - - // winner check - { - bvals = mbi.BeaconEntries - rbase = mbi.PrevBeaconEntry - if len(bvals) > 0 { - rbase = bvals[len(bvals)-1] - } - - eproof, err = gen.IsRoundWinner(ctx, round, maddr, rbase, mbi, t.api) - if err != nil { - log.Warnw("WinPoSt failed to check if we win next round", "error", err) - return false, xerrors.Errorf("failed to check if we win next round: %w", err) - } - - if eproof == nil { - // not a winner, we're done here - log.Debugw("WinPoSt not a winner", "tipset", types.LogCids(base.TipSet.Cids())) - return persistNoWin() - } - } - - log.Infow("WinPostTask won election", "tipset", types.LogCids(base.TipSet.Cids()), "miner", maddr, "round", round, "eproof", eproof) - - // winning PoSt - var wpostProof []prooftypes.PoStProof - { - buf := new(bytes.Buffer) - if err := maddr.MarshalCBOR(buf); err != nil { - err = xerrors.Errorf("failed to marshal miner address: %w", err) - return false, err - } - - brand, err := lrand.DrawRandomnessFromBase(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, round, buf.Bytes()) - if err != nil { - err = xerrors.Errorf("failed to get randomness for winning post: %w", err) - return false, err - } - - prand := abi.PoStRandomness(brand) - prand[31] &= 0x3f // make into fr - - sectorNums := make([]abi.SectorNumber, len(mbi.Sectors)) - for i, s := range mbi.Sectors { - sectorNums[i] = s.SectorNumber - } - - ppt, err := mbi.Sectors[0].SealProof.RegisteredWinningPoStProof() - if err != nil { - return false, xerrors.Errorf("mapping sector seal proof type to post proof type: %w", err) - } - - postChallenges, err := ffi.GeneratePoStFallbackSectorChallenges(ppt, abi.ActorID(details.SpID), prand, sectorNums) - if err != nil { - return false, xerrors.Errorf("generating election challenges: %v", err) - } - - sectorChallenges := make([]storiface.PostSectorChallenge, len(mbi.Sectors)) - for i, s := range mbi.Sectors { - sectorChallenges[i] = storiface.PostSectorChallenge{ - SealProof: s.SealProof, - SectorNumber: s.SectorNumber, - SealedCID: s.SealedCID, - Challenge: postChallenges.Challenges[s.SectorNumber], - Update: s.SectorKey != nil, - } - } - - wpostProof, err = t.prover.GenerateWinningPoSt(ctx, ppt, abi.ActorID(details.SpID), sectorChallenges, prand) - if err != nil { - err = xerrors.Errorf("failed to compute winning post proof: %w", err) - return false, err - } - } - - log.Infow("WinPostTask winning PoSt computed", "tipset", types.LogCids(base.TipSet.Cids()), "miner", maddr, "round", round, "proofs", wpostProof) - - ticket, err := t.computeTicket(ctx, maddr, &rbase, round, base.TipSet.MinTicket(), mbi) - if err != nil { - return false, xerrors.Errorf("scratching ticket failed: %w", err) - } - - // get pending messages early, - msgs, err := t.api.MpoolSelect(ctx, base.TipSet.Key(), ticket.Quality()) - if err != nil { - return false, xerrors.Errorf("failed to select messages for block: %w", err) - } - - log.Infow("WinPostTask selected messages", "tipset", types.LogCids(base.TipSet.Cids()), "miner", maddr, "round", round, "messages", len(msgs)) - - // equivocation handling - { - // This next block exists to "catch" equivocating miners, - // who submit 2 blocks at the same height at different times in order to split the network. - // To safeguard against this, we make sure it's been EquivocationDelaySecs since our base was calculated, - // then re-calculate it. - // If the daemon detected equivocated blocks, those blocks will no longer be in the new base. - time.Sleep(time.Until(base.ComputeTime.Add(time.Duration(build.EquivocationDelaySecs) * time.Second))) - - bestTs, err := t.api.ChainHead(ctx) - if err != nil { - return false, xerrors.Errorf("failed to get chain head: %w", err) - } - - headWeight, err := t.api.ChainTipSetWeight(ctx, bestTs.Key()) - if err != nil { - return false, xerrors.Errorf("failed to get chain head weight: %w", err) - } - - baseWeight, err := t.api.ChainTipSetWeight(ctx, base.TipSet.Key()) - if err != nil { - return false, xerrors.Errorf("failed to get base weight: %w", err) - } - if types.BigCmp(headWeight, baseWeight) <= 0 { - bestTs = base.TipSet - } - - // If the base has changed, we take the _intersection_ of our old base and new base, - // thus ejecting blocks from any equivocating miners, without taking any new blocks. - if bestTs.Height() == base.TipSet.Height() && !bestTs.Equals(base.TipSet) { - log.Warnf("base changed from %s to %s, taking intersection", base.TipSet.Key(), bestTs.Key()) - newBaseMap := map[cid.Cid]struct{}{} - for _, newBaseBlk := range bestTs.Cids() { - newBaseMap[newBaseBlk] = struct{}{} - } - - refreshedBaseBlocks := make([]*types.BlockHeader, 0, len(base.TipSet.Cids())) - for _, baseBlk := range base.TipSet.Blocks() { - if _, ok := newBaseMap[baseBlk.Cid()]; ok { - refreshedBaseBlocks = append(refreshedBaseBlocks, baseBlk) - } - } - - if len(refreshedBaseBlocks) != 0 && len(refreshedBaseBlocks) != len(base.TipSet.Blocks()) { - refreshedBase, err := types.NewTipSet(refreshedBaseBlocks) - if err != nil { - return false, xerrors.Errorf("failed to create new tipset when refreshing: %w", err) - } - - if !base.TipSet.MinTicket().Equals(refreshedBase.MinTicket()) { - log.Warn("recomputing ticket due to base refresh") - - ticket, err = t.computeTicket(ctx, maddr, &rbase, round, refreshedBase.MinTicket(), mbi) - if err != nil { - return false, xerrors.Errorf("failed to refresh ticket: %w", err) - } - } - - log.Warn("re-selecting messages due to base refresh") - // refresh messages, as the selected messages may no longer be valid - msgs, err = t.api.MpoolSelect(ctx, refreshedBase.Key(), ticket.Quality()) - if err != nil { - return false, xerrors.Errorf("failed to re-select messages for block: %w", err) - } - - base.TipSet = refreshedBase - } - } - } - - log.Infow("WinPostTask base ready", "tipset", types.LogCids(base.TipSet.Cids()), "miner", maddr, "round", round, "ticket", ticket) - - // block construction - var blockMsg *types.BlockMsg - { - uts := base.TipSet.MinTimestamp() + build.BlockDelaySecs*(uint64(base.AddRounds)+1) - - blockMsg, err = t.api.MinerCreateBlock(context.TODO(), &api.BlockTemplate{ - Miner: maddr, - Parents: base.TipSet.Key(), - Ticket: ticket, - Eproof: eproof, - BeaconValues: bvals, - Messages: msgs, - Epoch: round, - Timestamp: uts, - WinningPoStProof: wpostProof, - }) - if err != nil { - return false, xerrors.Errorf("failed to create block: %w", err) - } - } - - log.Infow("WinPostTask block ready", "tipset", types.LogCids(base.TipSet.Cids()), "miner", maddr, "round", round, "block", blockMsg.Header.Cid(), "timestamp", blockMsg.Header.Timestamp) - - // persist in db - { - bhjson, err := json.Marshal(blockMsg.Header) - if err != nil { - return false, xerrors.Errorf("failed to marshal block header: %w", err) - } - - _, err = t.db.Exec(ctx, `UPDATE mining_tasks - SET won = true, mined_cid = $2, mined_header = $3, mined_at = $4 - WHERE task_id = $1`, taskID, blockMsg.Header.Cid(), string(bhjson), time.Now().UTC()) - if err != nil { - return false, xerrors.Errorf("failed to update mining task: %w", err) - } - } - - // wait until block timestamp - { - log.Infow("WinPostTask waiting for block timestamp", "tipset", types.LogCids(base.TipSet.Cids()), "miner", maddr, "round", round, "block", blockMsg.Header.Cid(), "until", time.Unix(int64(blockMsg.Header.Timestamp), 0)) - time.Sleep(time.Until(time.Unix(int64(blockMsg.Header.Timestamp), 0))) - } - - // submit block!! - { - log.Infow("WinPostTask submitting block", "tipset", types.LogCids(base.TipSet.Cids()), "miner", maddr, "round", round, "block", blockMsg.Header.Cid()) - if err := t.api.SyncSubmitBlock(ctx, blockMsg); err != nil { - return false, xerrors.Errorf("failed to submit block: %w", err) - } - } - - log.Infow("mined a block", "tipset", types.LogCids(blockMsg.Header.Parents), "height", blockMsg.Header.Height, "miner", maddr, "cid", blockMsg.Header.Cid()) - - // persist that we've submitted the block - { - _, err = t.db.Exec(ctx, `UPDATE mining_tasks - SET submitted_at = $2 - WHERE task_id = $1`, taskID, time.Now().UTC()) - if err != nil { - return false, xerrors.Errorf("failed to update mining task: %w", err) - } - } - - return true, nil -} - -func (t *WinPostTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - if len(ids) == 0 { - // probably can't happen, but panicking is bad - return nil, nil - } - - // select lowest epoch - var lowestEpoch abi.ChainEpoch - var lowestEpochID = ids[0] - for _, id := range ids { - var epoch uint64 - err := t.db.QueryRow(context.Background(), `SELECT epoch FROM mining_tasks WHERE task_id = $1`, id).Scan(&epoch) - if err != nil { - return nil, err - } - - if lowestEpoch == 0 || abi.ChainEpoch(epoch) < lowestEpoch { - lowestEpoch = abi.ChainEpoch(epoch) - lowestEpochID = id - } - } - - return &lowestEpochID, nil -} - -func (t *WinPostTask) TypeDetails() harmonytask.TaskTypeDetails { - return harmonytask.TaskTypeDetails{ - Name: "WinPost", - Max: t.max, - MaxFailures: 3, - Follows: nil, - Cost: resources.Resources{ - Cpu: 1, - - // todo set to something for 32/64G sector sizes? Technically windowPoSt is happy on a CPU - // but it will use a GPU if available - Gpu: 0, - - Ram: 1 << 30, // todo arbitrary number - }, - } -} - -func (t *WinPostTask) Adder(taskFunc harmonytask.AddTaskFunc) { - t.mineTF.Set(taskFunc) -} - -// MiningBase is the tipset on top of which we plan to construct our next block. -// Refer to godocs on GetBestMiningCandidate. -type MiningBase struct { - TipSet *types.TipSet - ComputeTime time.Time - AddRounds abi.ChainEpoch -} - -func (mb MiningBase) epoch() abi.ChainEpoch { - // return the epoch that will result from mining on this base - return mb.TipSet.Height() + mb.AddRounds + 1 -} - -func (mb MiningBase) baseTime() time.Time { - tsTime := time.Unix(int64(mb.TipSet.MinTimestamp()), 0) - roundDelay := build.BlockDelaySecs * uint64(mb.AddRounds+1) - tsTime = tsTime.Add(time.Duration(roundDelay) * time.Second) - return tsTime -} - -func (mb MiningBase) afterPropDelay() time.Time { - return mb.baseTime().Add(time.Duration(build.PropagationDelaySecs) * time.Second).Add(randTimeOffset(time.Second)) -} - -func (t *WinPostTask) mineBasic(ctx context.Context) { - var workBase MiningBase - - taskFn := t.mineTF.Val(ctx) - - // initialize workbase - { - head := retry1(func() (*types.TipSet, error) { - return t.api.ChainHead(ctx) - }) - - workBase = MiningBase{ - TipSet: head, - AddRounds: 0, - ComputeTime: time.Now(), - } - } - - /* - - /- T+0 == workBase.baseTime - | - >--------*------*--------[wait until next round]-----> - | - |- T+PD == workBase.afterPropDelay+(~1s) - |- Here we acquire the new workBase, and start a new round task - \- Then we loop around, and wait for the next head - - time --> - */ - - for { - // limit the rate at which we mine blocks to at least EquivocationDelaySecs - // this is to prevent races on devnets in catch up mode. Acts as a minimum - // delay for the sleep below. - time.Sleep(time.Duration(build.EquivocationDelaySecs)*time.Second + time.Second) - - // wait for *NEXT* propagation delay - time.Sleep(time.Until(workBase.afterPropDelay())) - - // check current best candidate - maybeBase := retry1(func() (*types.TipSet, error) { - return t.api.ChainHead(ctx) - }) - - if workBase.TipSet.Equals(maybeBase) { - // workbase didn't change in the new round so we have a null round here - workBase.AddRounds++ - log.Debugw("workbase update", "tipset", workBase.TipSet.Cids(), "nulls", workBase.AddRounds, "lastUpdate", time.Since(workBase.ComputeTime), "type", "same-tipset") - } else { - btsw := retry1(func() (types.BigInt, error) { - return t.api.ChainTipSetWeight(ctx, maybeBase.Key()) - }) - - ltsw := retry1(func() (types.BigInt, error) { - return t.api.ChainTipSetWeight(ctx, workBase.TipSet.Key()) - }) - - if types.BigCmp(btsw, ltsw) <= 0 { - // new tipset for some reason has less weight than the old one, assume null round here - // NOTE: the backing node may have reorged, or manually changed head - workBase.AddRounds++ - log.Debugw("workbase update", "tipset", workBase.TipSet.Cids(), "nulls", workBase.AddRounds, "lastUpdate", time.Since(workBase.ComputeTime), "type", "prefer-local-weight") - } else { - // new tipset has more weight, so we should mine on it, no null round here - log.Debugw("workbase update", "tipset", workBase.TipSet.Cids(), "nulls", workBase.AddRounds, "lastUpdate", time.Since(workBase.ComputeTime), "type", "prefer-new-tipset") - - workBase = MiningBase{ - TipSet: maybeBase, - AddRounds: 0, - ComputeTime: time.Now(), - } - } - } - - // dispatch mining task - // (note equivocation prevention is handled by the mining code) - - baseEpoch := workBase.TipSet.Height() - - for act := range t.actors { - spID, err := address.IDFromAddress(address.Address(act)) - if err != nil { - log.Errorf("failed to get spID from address %s: %s", act, err) - continue - } - - taskFn(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { - // First we check if the mining base includes blocks we may have mined previously to avoid getting slashed - // select mining_tasks where epoch==base_epoch if win=true to maybe get base block cid which has to be included in our tipset - var baseBlockCids []string - err := tx.Select(&baseBlockCids, `SELECT mined_cid FROM mining_tasks WHERE epoch = $1 AND sp_id = $2 AND won = true`, baseEpoch, spID) - if err != nil { - return false, xerrors.Errorf("querying mining_tasks: %w", err) - } - if len(baseBlockCids) >= 1 { - baseBlockCid := baseBlockCids[0] - c, err := cid.Parse(baseBlockCid) - if err != nil { - return false, xerrors.Errorf("parsing mined_cid: %w", err) - } - - // we have mined in the previous round, make sure that our block is included in the tipset - // if it's not we risk getting slashed - - var foundOurs bool - for _, c2 := range workBase.TipSet.Cids() { - if c == c2 { - foundOurs = true - break - } - } - if !foundOurs { - log.Errorw("our block was not included in the tipset, aborting", "tipset", workBase.TipSet.Cids(), "ourBlock", c) - return false, xerrors.Errorf("our block was not included in the tipset, aborting") - } - } - - _, err = tx.Exec(`INSERT INTO mining_tasks (task_id, sp_id, epoch, base_compute_time) VALUES ($1, $2, $3, $4)`, id, spID, workBase.epoch(), workBase.ComputeTime.UTC()) - if err != nil { - return false, xerrors.Errorf("inserting mining_tasks: %w", err) - } - - for _, c := range workBase.TipSet.Cids() { - _, err = tx.Exec(`INSERT INTO mining_base_block (task_id, sp_id, block_cid) VALUES ($1, $2, $3)`, id, spID, c) - if err != nil { - return false, xerrors.Errorf("inserting mining base blocks: %w", err) - } - } - - return true, nil // no errors, commit the transaction - }) - } - } -} - -func (t *WinPostTask) computeTicket(ctx context.Context, maddr address.Address, brand *types.BeaconEntry, round abi.ChainEpoch, chainRand *types.Ticket, mbi *api.MiningBaseInfo) (*types.Ticket, error) { - buf := new(bytes.Buffer) - if err := maddr.MarshalCBOR(buf); err != nil { - return nil, xerrors.Errorf("failed to marshal address to cbor: %w", err) - } - - if round > build.UpgradeSmokeHeight { - buf.Write(chainRand.VRFProof) - } - - input, err := lrand.DrawRandomnessFromBase(brand.Data, crypto.DomainSeparationTag_TicketProduction, round-build.TicketRandomnessLookback, buf.Bytes()) - if err != nil { - return nil, err - } - - vrfOut, err := gen.ComputeVRF(ctx, t.api.WalletSign, mbi.WorkerKey, input) - if err != nil { - return nil, err - } - - return &types.Ticket{ - VRFProof: vrfOut, - }, nil -} - -func randTimeOffset(width time.Duration) time.Duration { - buf := make([]byte, 8) - _, _ = rand.Reader.Read(buf) - val := time.Duration(binary.BigEndian.Uint64(buf) % uint64(width)) - - return val - (width / 2) -} - -func retry1[R any](f func() (R, error)) R { - for { - r, err := f() - if err == nil { - return r - } - - log.Errorw("error in mining loop, retrying", "error", err) - time.Sleep(time.Second) - } -} - -var _ harmonytask.TaskInterface = &WinPostTask{} diff --git a/documentation/en/api-v0-methods-curio.md b/documentation/en/api-v0-methods-curio.md deleted file mode 100644 index 0bfe09af5cb..00000000000 --- a/documentation/en/api-v0-methods-curio.md +++ /dev/null @@ -1,369 +0,0 @@ -# Groups -* [](#) - * [Shutdown](#Shutdown) - * [Version](#Version) -* [Allocate](#Allocate) - * [AllocatePieceToSector](#AllocatePieceToSector) -* [Log](#Log) - * [LogList](#LogList) - * [LogSetLevel](#LogSetLevel) -* [Storage](#Storage) - * [StorageAddLocal](#StorageAddLocal) - * [StorageDetachLocal](#StorageDetachLocal) - * [StorageFindSector](#StorageFindSector) - * [StorageInfo](#StorageInfo) - * [StorageInit](#StorageInit) - * [StorageList](#StorageList) - * [StorageLocal](#StorageLocal) - * [StorageStat](#StorageStat) -## - - -### Shutdown -Trigger shutdown - - -Perms: admin - -Inputs: `null` - -Response: `{}` - -### Version - - -Perms: admin - -Inputs: `null` - -Response: `131840` - -## Allocate - - -### AllocatePieceToSector - - -Perms: write - -Inputs: -```json -[ - "f01234", - { - "PublishCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "DealID": 5432, - "DealProposal": { - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1032, - "VerifiedDeal": true, - "Client": "f01234", - "Provider": "f01234", - "Label": "", - "StartEpoch": 10101, - "EndEpoch": 10101, - "StoragePricePerEpoch": "0", - "ProviderCollateral": "0", - "ClientCollateral": "0" - }, - "DealSchedule": { - "StartEpoch": 10101, - "EndEpoch": 10101 - }, - "PieceActivationManifest": { - "CID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Size": 2032, - "VerifiedAllocationKey": null, - "Notify": null - }, - "KeepUnsealed": true - }, - 9, - { - "Scheme": "string value", - "Opaque": "string value", - "User": {}, - "Host": "string value", - "Path": "string value", - "RawPath": "string value", - "OmitHost": true, - "ForceQuery": true, - "RawQuery": "string value", - "Fragment": "string value", - "RawFragment": "string value" - }, - { - "Authorization": [ - "Bearer ey.." - ] - } -] -``` - -Response: -```json -{ - "Sector": 9, - "Offset": 1032 -} -``` - -## Log - - -### LogList - - -Perms: read - -Inputs: `null` - -Response: -```json -[ - "string value" -] -``` - -### LogSetLevel - - -Perms: admin - -Inputs: -```json -[ - "string value", - "string value" -] -``` - -Response: `{}` - -## Storage - - -### StorageAddLocal - - -Perms: admin - -Inputs: -```json -[ - "string value" -] -``` - -Response: `{}` - -### StorageDetachLocal - - -Perms: admin - -Inputs: -```json -[ - "string value" -] -``` - -Response: `{}` - -### StorageFindSector - - -Perms: admin - -Inputs: -```json -[ - { - "Miner": 1000, - "Number": 9 - }, - 1, - 34359738368, - true -] -``` - -Response: -```json -[ - { - "ID": "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", - "URLs": [ - "string value" - ], - "BaseURLs": [ - "string value" - ], - "Weight": 42, - "CanSeal": true, - "CanStore": true, - "Primary": true, - "AllowTypes": [ - "string value" - ], - "DenyTypes": [ - "string value" - ], - "AllowMiners": [ - "string value" - ], - "DenyMiners": [ - "string value" - ] - } -] -``` - -### StorageInfo - - -Perms: admin - -Inputs: -```json -[ - "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8" -] -``` - -Response: -```json -{ - "ID": "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", - "URLs": [ - "string value" - ], - "Weight": 42, - "MaxStorage": 42, - "CanSeal": true, - "CanStore": true, - "Groups": [ - "string value" - ], - "AllowTo": [ - "string value" - ], - "AllowTypes": [ - "string value" - ], - "DenyTypes": [ - "string value" - ], - "AllowMiners": [ - "string value" - ], - "DenyMiners": [ - "string value" - ] -} -``` - -### StorageInit - - -Perms: admin - -Inputs: -```json -[ - "string value", - { - "ID": "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", - "Weight": 42, - "CanSeal": true, - "CanStore": true, - "MaxStorage": 42, - "Groups": [ - "string value" - ], - "AllowTo": [ - "string value" - ], - "AllowTypes": [ - "string value" - ], - "DenyTypes": [ - "string value" - ], - "AllowMiners": [ - "string value" - ], - "DenyMiners": [ - "string value" - ] - } -] -``` - -Response: `{}` - -### StorageList - - -Perms: admin - -Inputs: `null` - -Response: -```json -{ - "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": [ - { - "Miner": 1000, - "Number": 100, - "SectorFileType": 2 - } - ] -} -``` - -### StorageLocal - - -Perms: admin - -Inputs: `null` - -Response: -```json -{ - "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": "/data/path" -} -``` - -### StorageStat - - -Perms: admin - -Inputs: -```json -[ - "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8" -] -``` - -Response: -```json -{ - "Capacity": 9, - "Available": 9, - "FSAvailable": 9, - "Reserved": 9, - "Max": 9, - "Used": 9 -} -``` - diff --git a/documentation/en/api-v0-methods-miner.md b/documentation/en/api-v0-methods-miner.md index 802cd3ce591..0bf4d3325c8 100644 --- a/documentation/en/api-v0-methods-miner.md +++ b/documentation/en/api-v0-methods-miner.md @@ -23,89 +23,14 @@ * [ComputeWindowPoSt](#ComputeWindowPoSt) * [Create](#Create) * [CreateBackup](#CreateBackup) -* [Dagstore](#Dagstore) - * [DagstoreGC](#DagstoreGC) - * [DagstoreInitializeAll](#DagstoreInitializeAll) - * [DagstoreInitializeShard](#DagstoreInitializeShard) - * [DagstoreListShards](#DagstoreListShards) - * [DagstoreLookupPieces](#DagstoreLookupPieces) - * [DagstoreRecoverShard](#DagstoreRecoverShard) - * [DagstoreRegisterShard](#DagstoreRegisterShard) -* [Deals](#Deals) - * [DealsConsiderOfflineRetrievalDeals](#DealsConsiderOfflineRetrievalDeals) - * [DealsConsiderOfflineStorageDeals](#DealsConsiderOfflineStorageDeals) - * [DealsConsiderOnlineRetrievalDeals](#DealsConsiderOnlineRetrievalDeals) - * [DealsConsiderOnlineStorageDeals](#DealsConsiderOnlineStorageDeals) - * [DealsConsiderUnverifiedStorageDeals](#DealsConsiderUnverifiedStorageDeals) - * [DealsConsiderVerifiedStorageDeals](#DealsConsiderVerifiedStorageDeals) - * [DealsImportData](#DealsImportData) - * [DealsList](#DealsList) - * [DealsPieceCidBlocklist](#DealsPieceCidBlocklist) - * [DealsSetConsiderOfflineRetrievalDeals](#DealsSetConsiderOfflineRetrievalDeals) - * [DealsSetConsiderOfflineStorageDeals](#DealsSetConsiderOfflineStorageDeals) - * [DealsSetConsiderOnlineRetrievalDeals](#DealsSetConsiderOnlineRetrievalDeals) - * [DealsSetConsiderOnlineStorageDeals](#DealsSetConsiderOnlineStorageDeals) - * [DealsSetConsiderUnverifiedStorageDeals](#DealsSetConsiderUnverifiedStorageDeals) - * [DealsSetConsiderVerifiedStorageDeals](#DealsSetConsiderVerifiedStorageDeals) - * [DealsSetPieceCidBlocklist](#DealsSetPieceCidBlocklist) -* [I](#I) - * [ID](#ID) -* [Indexer](#Indexer) - * [IndexerAnnounceAllDeals](#IndexerAnnounceAllDeals) - * [IndexerAnnounceDeal](#IndexerAnnounceDeal) * [Log](#Log) * [LogAlerts](#LogAlerts) * [LogList](#LogList) * [LogSetLevel](#LogSetLevel) * [Market](#Market) - * [MarketCancelDataTransfer](#MarketCancelDataTransfer) - * [MarketDataTransferDiagnostics](#MarketDataTransferDiagnostics) - * [MarketDataTransferUpdates](#MarketDataTransferUpdates) - * [MarketGetAsk](#MarketGetAsk) - * [MarketGetDealUpdates](#MarketGetDealUpdates) - * [MarketGetRetrievalAsk](#MarketGetRetrievalAsk) - * [MarketImportDealData](#MarketImportDealData) - * [MarketListDataTransfers](#MarketListDataTransfers) * [MarketListDeals](#MarketListDeals) - * [MarketListIncompleteDeals](#MarketListIncompleteDeals) - * [MarketListRetrievalDeals](#MarketListRetrievalDeals) - * [MarketPendingDeals](#MarketPendingDeals) - * [MarketPublishPendingDeals](#MarketPublishPendingDeals) - * [MarketRestartDataTransfer](#MarketRestartDataTransfer) - * [MarketRetryPublishDeal](#MarketRetryPublishDeal) - * [MarketSetAsk](#MarketSetAsk) - * [MarketSetRetrievalAsk](#MarketSetRetrievalAsk) * [Mining](#Mining) * [MiningBase](#MiningBase) -* [Net](#Net) - * [NetAddrsListen](#NetAddrsListen) - * [NetAgentVersion](#NetAgentVersion) - * [NetAutoNatStatus](#NetAutoNatStatus) - * [NetBandwidthStats](#NetBandwidthStats) - * [NetBandwidthStatsByPeer](#NetBandwidthStatsByPeer) - * [NetBandwidthStatsByProtocol](#NetBandwidthStatsByProtocol) - * [NetBlockAdd](#NetBlockAdd) - * [NetBlockList](#NetBlockList) - * [NetBlockRemove](#NetBlockRemove) - * [NetConnect](#NetConnect) - * [NetConnectedness](#NetConnectedness) - * [NetDisconnect](#NetDisconnect) - * [NetFindPeer](#NetFindPeer) - * [NetLimit](#NetLimit) - * [NetPeerInfo](#NetPeerInfo) - * [NetPeers](#NetPeers) - * [NetPing](#NetPing) - * [NetProtectAdd](#NetProtectAdd) - * [NetProtectList](#NetProtectList) - * [NetProtectRemove](#NetProtectRemove) - * [NetPubsubScores](#NetPubsubScores) - * [NetSetLimit](#NetSetLimit) - * [NetStat](#NetStat) -* [Pieces](#Pieces) - * [PiecesGetCIDInfo](#PiecesGetCIDInfo) - * [PiecesGetPieceInfo](#PiecesGetPieceInfo) - * [PiecesListCidInfos](#PiecesListCidInfos) - * [PiecesListPieces](#PiecesListPieces) * [Pledge](#Pledge) * [PledgeSector](#PledgeSector) * [Recover](#Recover) @@ -475,7 +400,7 @@ Inputs: ], "Bw==", 10101, - 22 + 23 ] ``` @@ -556,11 +481,10 @@ Inputs: Response: `{}` -## Dagstore +## Log -### DagstoreGC -DagstoreGC runs garbage collection on the DAG store. +### LogAlerts Perms: admin @@ -571,236 +495,61 @@ Response: ```json [ { - "Key": "baga6ea4seaqecmtz7iak33dsfshi627abz4i4665dfuzr3qfs4bmad6dx3iigdq", - "Success": false, - "Error": "\u003cerror\u003e" - } -] -``` - -### DagstoreInitializeAll -DagstoreInitializeAll initializes all uninitialized shards in bulk, -according to the policy passed in the parameters. - -It is recommended to set a maximum concurrency to avoid extreme -IO pressure if the storage subsystem has a large amount of deals. - -It returns a stream of events to report progress. - - -Perms: write - -Inputs: -```json -[ - { - "MaxConcurrency": 123, - "IncludeSealed": true + "Type": { + "System": "string value", + "Subsystem": "string value" + }, + "Active": true, + "LastActive": { + "Type": "string value", + "Message": "json raw message", + "Time": "0001-01-01T00:00:00Z" + }, + "LastResolved": { + "Type": "string value", + "Message": "json raw message", + "Time": "0001-01-01T00:00:00Z" + } } ] ``` -Response: -```json -{ - "Key": "string value", - "Event": "string value", - "Success": true, - "Error": "string value", - "Total": 123, - "Current": 123 -} -``` - -### DagstoreInitializeShard -DagstoreInitializeShard initializes an uninitialized shard. - -Initialization consists of fetching the shard's data (deal payload) from -the storage subsystem, generating an index, and persisting the index -to facilitate later retrievals, and/or to publish to external sources. - -This operation is intended to complement the initial migration. The -migration registers a shard for every unique piece CID, with lazy -initialization. Thus, shards are not initialized immediately to avoid -IO activity competing with proving. Instead, shard are initialized -when first accessed. This method forces the initialization of a shard by -accessing it and immediately releasing it. This is useful to warm up the -cache to facilitate subsequent retrievals, and to generate the indexes -to publish them externally. - -This operation fails if the shard is not in ShardStateNew state. -It blocks until initialization finishes. +### LogList Perms: write -Inputs: -```json -[ - "string value" -] -``` - -Response: `{}` - -### DagstoreListShards -DagstoreListShards returns information about all shards known to the -DAG store. Only available on nodes running the markets subsystem. - - -Perms: read - Inputs: `null` Response: ```json -[ - { - "Key": "baga6ea4seaqecmtz7iak33dsfshi627abz4i4665dfuzr3qfs4bmad6dx3iigdq", - "State": "ShardStateAvailable", - "Error": "\u003cerror\u003e" - } -] -``` - -### DagstoreLookupPieces -DagstoreLookupPieces returns information about shards that contain the given CID. - - -Perms: admin - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: -```json -[ - { - "Key": "baga6ea4seaqecmtz7iak33dsfshi627abz4i4665dfuzr3qfs4bmad6dx3iigdq", - "State": "ShardStateAvailable", - "Error": "\u003cerror\u003e" - } -] -``` - -### DagstoreRecoverShard -DagstoreRecoverShard attempts to recover a failed shard. - -This operation fails if the shard is not in ShardStateErrored state. -It blocks until recovery finishes. If recovery failed, it returns the -error. - - -Perms: write - -Inputs: -```json [ "string value" ] ``` -Response: `{}` - -### DagstoreRegisterShard -DagstoreRegisterShard registers a shard manually with dagstore with given pieceCID +### LogSetLevel -Perms: admin +Perms: write Inputs: ```json [ + "string value", "string value" ] ``` Response: `{}` -## Deals - - -### DealsConsiderOfflineRetrievalDeals - - -Perms: admin - -Inputs: `null` - -Response: `true` - -### DealsConsiderOfflineStorageDeals - - -Perms: admin - -Inputs: `null` - -Response: `true` - -### DealsConsiderOnlineRetrievalDeals - - -Perms: admin - -Inputs: `null` - -Response: `true` - -### DealsConsiderOnlineStorageDeals - - -Perms: admin - -Inputs: `null` - -Response: `true` - -### DealsConsiderUnverifiedStorageDeals - - -Perms: admin - -Inputs: `null` - -Response: `true` - -### DealsConsiderVerifiedStorageDeals - - -Perms: admin - -Inputs: `null` - -Response: `true` - -### DealsImportData - - -Perms: admin - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "string value" -] -``` +## Market -Response: `{}` -### DealsList +### MarketListDeals -Perms: admin +Perms: read Inputs: `null` @@ -824,6 +573,7 @@ Response: "ClientCollateral": "0" }, "State": { + "SectorNumber": 9, "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, "SlashEpoch": 10101 @@ -832,1431 +582,25 @@ Response: ] ``` -### DealsPieceCidBlocklist - - -Perms: admin - -Inputs: `null` - -Response: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -### DealsSetConsiderOfflineRetrievalDeals - - -Perms: admin - -Inputs: -```json -[ - true -] -``` - -Response: `{}` - -### DealsSetConsiderOfflineStorageDeals - - -Perms: admin - -Inputs: -```json -[ - true -] -``` - -Response: `{}` - -### DealsSetConsiderOnlineRetrievalDeals - - -Perms: admin - -Inputs: -```json -[ - true -] -``` - -Response: `{}` - -### DealsSetConsiderOnlineStorageDeals - - -Perms: admin - -Inputs: -```json -[ - true -] -``` - -Response: `{}` - -### DealsSetConsiderUnverifiedStorageDeals - - -Perms: admin +## Mining -Inputs: -```json -[ - true -] -``` -Response: `{}` +### MiningBase -### DealsSetConsiderVerifiedStorageDeals +Perms: read -Perms: admin +Inputs: `null` -Inputs: -```json -[ - true -] -``` - -Response: `{}` - -### DealsSetPieceCidBlocklist - - -Perms: admin - -Inputs: -```json -[ - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - ] -] -``` - -Response: `{}` - -## I - - -### ID - - -Perms: read - -Inputs: `null` - -Response: `"12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"` - -## Indexer - - -### IndexerAnnounceAllDeals -IndexerAnnounceAllDeals informs the indexer nodes aboutall active deals. - - -Perms: admin - -Inputs: `null` - -Response: `{}` - -### IndexerAnnounceDeal -IndexerAnnounceDeal informs indexer nodes that a new deal was received, -so they can download its index - - -Perms: admin - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: `{}` - -## Log - - -### LogAlerts - - -Perms: admin - -Inputs: `null` - -Response: -```json -[ - { - "Type": { - "System": "string value", - "Subsystem": "string value" - }, - "Active": true, - "LastActive": { - "Type": "string value", - "Message": "json raw message", - "Time": "0001-01-01T00:00:00Z" - }, - "LastResolved": { - "Type": "string value", - "Message": "json raw message", - "Time": "0001-01-01T00:00:00Z" - } - } -] -``` - -### LogList - - -Perms: write - -Inputs: `null` - -Response: -```json -[ - "string value" -] -``` - -### LogSetLevel - - -Perms: write - -Inputs: -```json -[ - "string value", - "string value" -] -``` - -Response: `{}` - -## Market - - -### MarketCancelDataTransfer -MarketCancelDataTransfer cancels a data transfer with the given transfer ID and other peer - - -Perms: write - -Inputs: -```json -[ - 3, - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - true -] -``` - -Response: `{}` - -### MarketDataTransferDiagnostics -MarketDataTransferDiagnostics generates debugging information about current data transfers over graphsync - - -Perms: write - -Inputs: -```json -[ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" -] -``` - -Response: -```json -{ - "ReceivingTransfers": [ - { - "RequestID": {}, - "RequestState": "string value", - "IsCurrentChannelRequest": true, - "ChannelID": { - "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "ID": 3 - }, - "ChannelState": { - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } - }, - "Diagnostics": [ - "string value" - ] - } - ], - "SendingTransfers": [ - { - "RequestID": {}, - "RequestState": "string value", - "IsCurrentChannelRequest": true, - "ChannelID": { - "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "ID": 3 - }, - "ChannelState": { - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } - }, - "Diagnostics": [ - "string value" - ] - } - ] -} -``` - -### MarketDataTransferUpdates - - -Perms: write - -Inputs: `null` - -Response: -```json -{ - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } -} -``` - -### MarketGetAsk - - -Perms: read - -Inputs: `null` - -Response: -```json -{ - "Ask": { - "Price": "0", - "VerifiedPrice": "0", - "MinPieceSize": 1032, - "MaxPieceSize": 1032, - "Miner": "f01234", - "Timestamp": 10101, - "Expiry": 10101, - "SeqNo": 42 - }, - "Signature": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - } -} -``` - -### MarketGetDealUpdates - - -Perms: read - -Inputs: `null` - -Response: -```json -{ - "Proposal": { - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1032, - "VerifiedDeal": true, - "Client": "f01234", - "Provider": "f01234", - "Label": "", - "StartEpoch": 10101, - "EndEpoch": 10101, - "StoragePricePerEpoch": "0", - "ProviderCollateral": "0", - "ClientCollateral": "0" - }, - "ClientSignature": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - }, - "ProposalCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "AddFundsCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PublishCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Miner": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Client": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "State": 42, - "PiecePath": ".lotusminer/fstmp123", - "MetadataPath": ".lotusminer/fstmp123", - "SlashEpoch": 10101, - "FastRetrieval": true, - "Message": "string value", - "FundsReserved": "0", - "Ref": { - "TransferType": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1024, - "RawBlockSize": 42 - }, - "AvailableForRetrieval": true, - "DealID": 5432, - "CreationTime": "0001-01-01T00:00:00Z", - "TransferChannelId": { - "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "ID": 3 - }, - "SectorNumber": 9, - "InboundCAR": "string value" -} -``` - -### MarketGetRetrievalAsk - - -Perms: read - -Inputs: `null` - -Response: -```json -{ - "PricePerByte": "0", - "UnsealPrice": "0", - "PaymentInterval": 42, - "PaymentIntervalIncrease": 42 -} -``` - -### MarketImportDealData - - -Perms: write - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "string value" -] -``` - -Response: `{}` - -### MarketListDataTransfers - - -Perms: write - -Inputs: `null` - -Response: -```json -[ - { - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } - } -] -``` - -### MarketListDeals - - -Perms: read - -Inputs: `null` - -Response: -```json -[ - { - "Proposal": { - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1032, - "VerifiedDeal": true, - "Client": "f01234", - "Provider": "f01234", - "Label": "", - "StartEpoch": 10101, - "EndEpoch": 10101, - "StoragePricePerEpoch": "0", - "ProviderCollateral": "0", - "ClientCollateral": "0" - }, - "State": { - "SectorStartEpoch": 10101, - "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101 - } - } -] -``` - -### MarketListIncompleteDeals - - -Perms: read - -Inputs: `null` - -Response: -```json -[ - { - "Proposal": { - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1032, - "VerifiedDeal": true, - "Client": "f01234", - "Provider": "f01234", - "Label": "", - "StartEpoch": 10101, - "EndEpoch": 10101, - "StoragePricePerEpoch": "0", - "ProviderCollateral": "0", - "ClientCollateral": "0" - }, - "ClientSignature": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - }, - "ProposalCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "AddFundsCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PublishCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Miner": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Client": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "State": 42, - "PiecePath": ".lotusminer/fstmp123", - "MetadataPath": ".lotusminer/fstmp123", - "SlashEpoch": 10101, - "FastRetrieval": true, - "Message": "string value", - "FundsReserved": "0", - "Ref": { - "TransferType": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1024, - "RawBlockSize": 42 - }, - "AvailableForRetrieval": true, - "DealID": 5432, - "CreationTime": "0001-01-01T00:00:00Z", - "TransferChannelId": { - "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "ID": 3 - }, - "SectorNumber": 9, - "InboundCAR": "string value" - } -] -``` - -### MarketListRetrievalDeals -MarketListRetrievalDeals is deprecated, returns empty list - - -Perms: read - -Inputs: `null` - -Response: -```json -[ - {} -] -``` - -### MarketPendingDeals - - -Perms: write - -Inputs: `null` - -Response: -```json -{ - "Deals": [ - { - "Proposal": { - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1032, - "VerifiedDeal": true, - "Client": "f01234", - "Provider": "f01234", - "Label": "", - "StartEpoch": 10101, - "EndEpoch": 10101, - "StoragePricePerEpoch": "0", - "ProviderCollateral": "0", - "ClientCollateral": "0" - }, - "ClientSignature": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - } - } - ], - "PublishPeriodStart": "0001-01-01T00:00:00Z", - "PublishPeriod": 60000000000 -} -``` - -### MarketPublishPendingDeals - - -Perms: admin - -Inputs: `null` - -Response: `{}` - -### MarketRestartDataTransfer -MarketRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer - - -Perms: write - -Inputs: -```json -[ - 3, - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - true -] -``` - -Response: `{}` - -### MarketRetryPublishDeal - - -Perms: admin - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: `{}` - -### MarketSetAsk - - -Perms: admin - -Inputs: -```json -[ - "0", - "0", - 10101, - 1032, - 1032 -] -``` - -Response: `{}` - -### MarketSetRetrievalAsk - - -Perms: admin - -Inputs: -```json -[ - { - "PricePerByte": "0", - "UnsealPrice": "0", - "PaymentInterval": 42, - "PaymentIntervalIncrease": 42 - } -] -``` - -Response: `{}` - -## Mining - - -### MiningBase - - -Perms: read - -Inputs: `null` - -Response: -```json -{ - "Cids": null, - "Blocks": null, - "Height": 0 -} -``` - -## Net - - -### NetAddrsListen - - -Perms: read - -Inputs: `null` - -Response: -```json -{ - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Addrs": [ - "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" - ] -} -``` - -### NetAgentVersion - - -Perms: read - -Inputs: -```json -[ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" -] -``` - -Response: `"string value"` - -### NetAutoNatStatus - - -Perms: read - -Inputs: `null` - -Response: -```json -{ - "Reachability": 1, - "PublicAddrs": [ - "string value" - ] -} -``` - -### NetBandwidthStats - - -Perms: read - -Inputs: `null` - -Response: -```json -{ - "TotalIn": 9, - "TotalOut": 9, - "RateIn": 12.3, - "RateOut": 12.3 -} -``` - -### NetBandwidthStatsByPeer - - -Perms: read - -Inputs: `null` - -Response: -```json -{ - "12D3KooWSXmXLJmBR1M7i9RW9GQPNUhZSzXKzxDHWtAgNuJAbyEJ": { - "TotalIn": 174000, - "TotalOut": 12500, - "RateIn": 100, - "RateOut": 50 - } -} -``` - -### NetBandwidthStatsByProtocol - - -Perms: read - -Inputs: `null` - -Response: -```json -{ - "/fil/hello/1.0.0": { - "TotalIn": 174000, - "TotalOut": 12500, - "RateIn": 100, - "RateOut": 50 - } -} -``` - -### NetBlockAdd - - -Perms: admin - -Inputs: -```json -[ - { - "Peers": [ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" - ], - "IPAddrs": [ - "string value" - ], - "IPSubnets": [ - "string value" - ] - } -] -``` - -Response: `{}` - -### NetBlockList - - -Perms: read - -Inputs: `null` - -Response: -```json -{ - "Peers": [ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" - ], - "IPAddrs": [ - "string value" - ], - "IPSubnets": [ - "string value" - ] -} -``` - -### NetBlockRemove - - -Perms: admin - -Inputs: -```json -[ - { - "Peers": [ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" - ], - "IPAddrs": [ - "string value" - ], - "IPSubnets": [ - "string value" - ] - } -] -``` - -Response: `{}` - -### NetConnect - - -Perms: write - -Inputs: -```json -[ - { - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Addrs": [ - "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" - ] - } -] -``` - -Response: `{}` - -### NetConnectedness - - -Perms: read - -Inputs: -```json -[ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" -] -``` - -Response: `1` - -### NetDisconnect - - -Perms: write - -Inputs: -```json -[ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" -] -``` - -Response: `{}` - -### NetFindPeer - - -Perms: read - -Inputs: -```json -[ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" -] -``` - -Response: -```json -{ - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Addrs": [ - "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" - ] -} -``` - -### NetLimit - - -Perms: read - -Inputs: -```json -[ - "string value" -] -``` - -Response: -```json -{ - "Memory": 123, - "Streams": 3, - "StreamsInbound": 1, - "StreamsOutbound": 2, - "Conns": 4, - "ConnsInbound": 3, - "ConnsOutbound": 4, - "FD": 5 -} -``` - -### NetPeerInfo - - -Perms: read - -Inputs: -```json -[ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" -] -``` - -Response: -```json -{ - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Agent": "string value", - "Addrs": [ - "string value" - ], - "Protocols": [ - "string value" - ], - "ConnMgrMeta": { - "FirstSeen": "0001-01-01T00:00:00Z", - "Value": 123, - "Tags": { - "name": 42 - }, - "Conns": { - "name": "2021-03-08T22:52:18Z" - } - } -} -``` - -### NetPeers - - -Perms: read - -Inputs: `null` - -Response: -```json -[ - { - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Addrs": [ - "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" - ] - } -] -``` - -### NetPing - - -Perms: read - -Inputs: -```json -[ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" -] -``` - -Response: `60000000000` - -### NetProtectAdd - - -Perms: admin - -Inputs: -```json -[ - [ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" - ] -] -``` - -Response: `{}` - -### NetProtectList - - -Perms: read - -Inputs: `null` - -Response: -```json -[ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" -] -``` - -### NetProtectRemove - - -Perms: admin - -Inputs: -```json -[ - [ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" - ] -] -``` - -Response: `{}` - -### NetPubsubScores - - -Perms: read - -Inputs: `null` - -Response: -```json -[ - { - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Score": { - "Score": 12.3, - "Topics": { - "/blocks": { - "TimeInMesh": 60000000000, - "FirstMessageDeliveries": 122, - "MeshMessageDeliveries": 1234, - "InvalidMessageDeliveries": 3 - } - }, - "AppSpecificScore": 12.3, - "IPColocationFactor": 12.3, - "BehaviourPenalty": 12.3 - } - } -] -``` - -### NetSetLimit - - -Perms: admin - -Inputs: -```json -[ - "string value", - { - "Memory": 123, - "Streams": 3, - "StreamsInbound": 1, - "StreamsOutbound": 2, - "Conns": 4, - "ConnsInbound": 3, - "ConnsOutbound": 4, - "FD": 5 - } -] -``` - -Response: `{}` - -### NetStat - - -Perms: read - -Inputs: -```json -[ - "string value" -] -``` - -Response: -```json -{ - "System": { - "NumStreamsInbound": 123, - "NumStreamsOutbound": 123, - "NumConnsInbound": 123, - "NumConnsOutbound": 123, - "NumFD": 123, - "Memory": 9 - }, - "Transient": { - "NumStreamsInbound": 123, - "NumStreamsOutbound": 123, - "NumConnsInbound": 123, - "NumConnsOutbound": 123, - "NumFD": 123, - "Memory": 9 - }, - "Services": { - "abc": { - "NumStreamsInbound": 1, - "NumStreamsOutbound": 2, - "NumConnsInbound": 3, - "NumConnsOutbound": 4, - "NumFD": 5, - "Memory": 123 - } - }, - "Protocols": { - "abc": { - "NumStreamsInbound": 1, - "NumStreamsOutbound": 2, - "NumConnsInbound": 3, - "NumConnsOutbound": 4, - "NumFD": 5, - "Memory": 123 - } - }, - "Peers": { - "abc": { - "NumStreamsInbound": 1, - "NumStreamsOutbound": 2, - "NumConnsInbound": 3, - "NumConnsOutbound": 4, - "NumFD": 5, - "Memory": 123 - } - } -} -``` - -## Pieces - - -### PiecesGetCIDInfo - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: -```json -{ - "CID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceBlockLocations": [ - { - "RelOffset": 42, - "BlockSize": 42, - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - } - ] -} -``` - -### PiecesGetPieceInfo - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: +Response: ```json { - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Deals": [ - { - "DealID": 5432, - "SectorID": 9, - "Offset": 1032, - "Length": 1032 - } - ] + "Cids": null, + "Blocks": null, + "Height": 0 } ``` -### PiecesListCidInfos - - -Perms: read - -Inputs: `null` - -Response: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -### PiecesListPieces - - -Perms: read - -Inputs: `null` - -Response: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - ## Pledge @@ -2795,8 +1139,7 @@ Response: [ "Mining", "Sealing", - "SectorStorage", - "Markets" + "SectorStorage" ] ``` diff --git a/documentation/en/api-v0-methods.md b/documentation/en/api-v0-methods.md index 9a8c4821b4b..bd58748eecb 100644 --- a/documentation/en/api-v0-methods.md +++ b/documentation/en/api-v0-methods.md @@ -34,34 +34,6 @@ * [ChainSetHead](#ChainSetHead) * [ChainStatObj](#ChainStatObj) * [ChainTipSetWeight](#ChainTipSetWeight) -* [Client](#Client) - * [ClientCalcCommP](#ClientCalcCommP) - * [ClientCancelDataTransfer](#ClientCancelDataTransfer) - * [ClientCancelRetrievalDeal](#ClientCancelRetrievalDeal) - * [ClientDataTransferUpdates](#ClientDataTransferUpdates) - * [ClientDealPieceCID](#ClientDealPieceCID) - * [ClientDealSize](#ClientDealSize) - * [ClientFindData](#ClientFindData) - * [ClientGenCar](#ClientGenCar) - * [ClientGetDealInfo](#ClientGetDealInfo) - * [ClientGetDealStatus](#ClientGetDealStatus) - * [ClientGetDealUpdates](#ClientGetDealUpdates) - * [ClientGetRetrievalUpdates](#ClientGetRetrievalUpdates) - * [ClientHasLocal](#ClientHasLocal) - * [ClientImport](#ClientImport) - * [ClientListDataTransfers](#ClientListDataTransfers) - * [ClientListDeals](#ClientListDeals) - * [ClientListImports](#ClientListImports) - * [ClientListRetrievals](#ClientListRetrievals) - * [ClientMinerQueryOffer](#ClientMinerQueryOffer) - * [ClientQueryAsk](#ClientQueryAsk) - * [ClientRemoveImport](#ClientRemoveImport) - * [ClientRestartDataTransfer](#ClientRestartDataTransfer) - * [ClientRetrieve](#ClientRetrieve) - * [ClientRetrieveTryRestartInsufficientFunds](#ClientRetrieveTryRestartInsufficientFunds) - * [ClientRetrieveWithEvents](#ClientRetrieveWithEvents) - * [ClientStartDeal](#ClientStartDeal) - * [ClientStatelessDeal](#ClientStatelessDeal) * [Create](#Create) * [CreateBackup](#CreateBackup) * [Gas](#Gas) @@ -1091,1054 +1063,6 @@ Inputs: Response: `"0"` -## Client -The Client methods all have to do with interacting with the storage and -retrieval markets as a client - - -### ClientCalcCommP -ClientCalcCommP calculates the CommP for a specified file - - -Perms: write - -Inputs: -```json -[ - "string value" -] -``` - -Response: -```json -{ - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Size": 1024 -} -``` - -### ClientCancelDataTransfer -ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer - - -Perms: write - -Inputs: -```json -[ - 3, - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - true -] -``` - -Response: `{}` - -### ClientCancelRetrievalDeal -ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID - - -Perms: write - -Inputs: -```json -[ - 5 -] -``` - -Response: `{}` - -### ClientDataTransferUpdates - - -Perms: write - -Inputs: `null` - -Response: -```json -{ - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } -} -``` - -### ClientDealPieceCID -ClientCalcCommP calculates the CommP and data size of the specified CID - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: -```json -{ - "PayloadSize": 9, - "PieceSize": 1032, - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -} -``` - -### ClientDealSize -ClientDealSize calculates real deal data size - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: -```json -{ - "PayloadSize": 9, - "PieceSize": 1032 -} -``` - -### ClientFindData -ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer). - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: -```json -[ - { - "Err": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Piece": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Size": 42, - "MinPrice": "0", - "UnsealPrice": "0", - "PricePerByte": "0", - "PaymentInterval": 42, - "PaymentIntervalIncrease": 42, - "Miner": "f01234", - "MinerPeer": { - "Address": "f01234", - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - } - } -] -``` - -### ClientGenCar -ClientGenCar generates a CAR file for the specified file. - - -Perms: write - -Inputs: -```json -[ - { - "Path": "string value", - "IsCAR": true - }, - "string value" -] -``` - -Response: `{}` - -### ClientGetDealInfo -ClientGetDealInfo returns the latest information about a given deal. - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: -```json -{ - "ProposalCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "State": 42, - "Message": "string value", - "DealStages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "ExpectedDuration": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - }, - "Provider": "f01234", - "DataRef": { - "TransferType": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1024, - "RawBlockSize": 42 - }, - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Size": 42, - "PricePerEpoch": "0", - "Duration": 42, - "DealID": 5432, - "CreationTime": "0001-01-01T00:00:00Z", - "Verified": true, - "TransferChannelID": { - "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "ID": 3 - }, - "DataTransfer": { - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } - } -} -``` - -### ClientGetDealStatus -ClientGetDealStatus returns status given a code - - -Perms: read - -Inputs: -```json -[ - 42 -] -``` - -Response: `"string value"` - -### ClientGetDealUpdates -ClientGetDealUpdates returns the status of updated deals - - -Perms: write - -Inputs: `null` - -Response: -```json -{ - "ProposalCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "State": 42, - "Message": "string value", - "DealStages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "ExpectedDuration": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - }, - "Provider": "f01234", - "DataRef": { - "TransferType": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1024, - "RawBlockSize": 42 - }, - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Size": 42, - "PricePerEpoch": "0", - "Duration": 42, - "DealID": 5432, - "CreationTime": "0001-01-01T00:00:00Z", - "Verified": true, - "TransferChannelID": { - "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "ID": 3 - }, - "DataTransfer": { - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } - } -} -``` - -### ClientGetRetrievalUpdates -ClientGetRetrievalUpdates returns status of updated retrieval deals - - -Perms: write - -Inputs: `null` - -Response: -```json -{ - "PayloadCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "ID": 5, - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PricePerByte": "0", - "UnsealPrice": "0", - "Status": 0, - "Message": "string value", - "Provider": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "BytesReceived": 42, - "BytesPaidFor": 42, - "TotalPaid": "0", - "TransferChannelID": { - "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "ID": 3 - }, - "DataTransfer": { - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } - }, - "Event": 5 -} -``` - -### ClientHasLocal -ClientHasLocal indicates whether a certain CID is locally stored. - - -Perms: write - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: `true` - -### ClientImport -ClientImport imports file under the specified path into filestore. - - -Perms: admin - -Inputs: -```json -[ - { - "Path": "string value", - "IsCAR": true - } -] -``` - -Response: -```json -{ - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "ImportID": 50 -} -``` - -### ClientListDataTransfers -ClientListTransfers returns the status of all ongoing transfers of data - - -Perms: write - -Inputs: `null` - -Response: -```json -[ - { - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } - } -] -``` - -### ClientListDeals -ClientListDeals returns information about the deals made by the local client. - - -Perms: write - -Inputs: `null` - -Response: -```json -[ - { - "ProposalCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "State": 42, - "Message": "string value", - "DealStages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "ExpectedDuration": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - }, - "Provider": "f01234", - "DataRef": { - "TransferType": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1024, - "RawBlockSize": 42 - }, - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Size": 42, - "PricePerEpoch": "0", - "Duration": 42, - "DealID": 5432, - "CreationTime": "0001-01-01T00:00:00Z", - "Verified": true, - "TransferChannelID": { - "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "ID": 3 - }, - "DataTransfer": { - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } - } - } -] -``` - -### ClientListImports -ClientListImports lists imported files and their root CIDs - - -Perms: write - -Inputs: `null` - -Response: -```json -[ - { - "Key": 50, - "Err": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Source": "string value", - "FilePath": "string value", - "CARPath": "string value" - } -] -``` - -### ClientListRetrievals -ClientQueryAsk returns a signed StorageAsk from the specified miner. -ClientListRetrievals returns information about retrievals made by the local client - - -Perms: write - -Inputs: `null` - -Response: -```json -[ - { - "PayloadCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "ID": 5, - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PricePerByte": "0", - "UnsealPrice": "0", - "Status": 0, - "Message": "string value", - "Provider": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "BytesReceived": 42, - "BytesPaidFor": 42, - "TotalPaid": "0", - "TransferChannelID": { - "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "ID": 3 - }, - "DataTransfer": { - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } - }, - "Event": 5 - } -] -``` - -### ClientMinerQueryOffer -ClientMinerQueryOffer returns a QueryOffer for the specific miner and file. - - -Perms: read - -Inputs: -```json -[ - "f01234", - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: -```json -{ - "Err": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Piece": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Size": 42, - "MinPrice": "0", - "UnsealPrice": "0", - "PricePerByte": "0", - "PaymentInterval": 42, - "PaymentIntervalIncrease": 42, - "Miner": "f01234", - "MinerPeer": { - "Address": "f01234", - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - } -} -``` - -### ClientQueryAsk - - -Perms: read - -Inputs: -```json -[ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "f01234" -] -``` - -Response: -```json -{ - "Price": "0", - "VerifiedPrice": "0", - "MinPieceSize": 1032, - "MaxPieceSize": 1032, - "Miner": "f01234", - "Timestamp": 10101, - "Expiry": 10101, - "SeqNo": 42 -} -``` - -### ClientRemoveImport -ClientRemoveImport removes file import - - -Perms: admin - -Inputs: -```json -[ - 50 -] -``` - -Response: `{}` - -### ClientRestartDataTransfer -ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer - - -Perms: write - -Inputs: -```json -[ - 3, - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - true -] -``` - -Response: `{}` - -### ClientRetrieve -ClientRetrieve initiates the retrieval of a file, as specified in the order. - - -Perms: admin - -Inputs: -```json -[ - { - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Piece": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "DatamodelPathSelector": "Links/21/Hash/Links/42/Hash", - "Size": 42, - "FromLocalCAR": "string value", - "Total": "0", - "UnsealPrice": "0", - "PaymentInterval": 42, - "PaymentIntervalIncrease": 42, - "Client": "f01234", - "Miner": "f01234", - "MinerPeer": { - "Address": "f01234", - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - } - }, - { - "Path": "string value", - "IsCAR": true - } -] -``` - -Response: `{}` - -### ClientRetrieveTryRestartInsufficientFunds -ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel -which are stuck due to insufficient funds - - -Perms: write - -Inputs: -```json -[ - "f01234" -] -``` - -Response: `{}` - -### ClientRetrieveWithEvents -ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel -of status updates. - - -Perms: admin - -Inputs: -```json -[ - { - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Piece": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "DatamodelPathSelector": "Links/21/Hash/Links/42/Hash", - "Size": 42, - "FromLocalCAR": "string value", - "Total": "0", - "UnsealPrice": "0", - "PaymentInterval": 42, - "PaymentIntervalIncrease": 42, - "Client": "f01234", - "Miner": "f01234", - "MinerPeer": { - "Address": "f01234", - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - } - }, - { - "Path": "string value", - "IsCAR": true - } -] -``` - -Response: -```json -{ - "Event": 5, - "Status": 0, - "BytesReceived": 42, - "FundsSpent": "0", - "Err": "string value" -} -``` - -### ClientStartDeal -ClientStartDeal proposes a deal with a miner. - - -Perms: admin - -Inputs: -```json -[ - { - "Data": { - "TransferType": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1024, - "RawBlockSize": 42 - }, - "Wallet": "f01234", - "Miner": "f01234", - "EpochPrice": "0", - "MinBlocksDuration": 42, - "ProviderCollateral": "0", - "DealStartEpoch": 10101, - "FastRetrieval": true, - "VerifiedDeal": true - } -] -``` - -Response: -```json -{ - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" -} -``` - -### ClientStatelessDeal -ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking. - - -Perms: write - -Inputs: -```json -[ - { - "Data": { - "TransferType": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1024, - "RawBlockSize": 42 - }, - "Wallet": "f01234", - "Miner": "f01234", - "EpochPrice": "0", - "MinBlocksDuration": 42, - "ProviderCollateral": "0", - "DealStartEpoch": 10101, - "FastRetrieval": true, - "VerifiedDeal": true - } -] -``` - -Response: -```json -{ - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" -} -``` - ## Create @@ -4697,9 +3621,6 @@ Inputs: `null` Response: `"0001-01-01T00:00:00Z"` ## State -The State methods are used to query, inspect, and interact with chain state. -Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset. -A nil TipSetKey can be provided as a param, this will cause the heaviest tipset in the chain to be used. ### StateAccountKey @@ -4734,7 +3655,7 @@ Perms: read Inputs: ```json [ - 22 + 23 ] ``` @@ -4749,7 +3670,7 @@ Perms: read Inputs: ```json [ - 22 + 23 ] ``` @@ -5561,7 +4482,8 @@ Response: "UpgradeThunderHeight": 10101, "UpgradeWatermelonHeight": 10101, "UpgradeDragonHeight": 10101, - "UpgradePhoenixHeight": 10101 + "UpgradePhoenixHeight": 10101, + "UpgradeAussieHeight": 10101 }, "Eip155ChainID": 123 } @@ -5840,6 +4762,7 @@ Response: "ClientCollateral": "0" }, "State": { + "SectorNumber": 9, "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, "SlashEpoch": 10101 @@ -5918,6 +4841,7 @@ Response: "ClientCollateral": "0" }, "State": { + "SectorNumber": 9, "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, "SlashEpoch": 10101 @@ -6485,7 +5409,7 @@ Inputs: ] ``` -Response: `22` +Response: `23` ### StateReadState StateReadState returns the indicated actor's state. diff --git a/documentation/en/api-v1-unstable-methods.md b/documentation/en/api-v1-unstable-methods.md index 8dab4f35f3d..5951b134c91 100644 --- a/documentation/en/api-v1-unstable-methods.md +++ b/documentation/en/api-v1-unstable-methods.md @@ -37,35 +37,6 @@ * [ChainSetHead](#ChainSetHead) * [ChainStatObj](#ChainStatObj) * [ChainTipSetWeight](#ChainTipSetWeight) -* [Client](#Client) - * [ClientCalcCommP](#ClientCalcCommP) - * [ClientCancelDataTransfer](#ClientCancelDataTransfer) - * [ClientCancelRetrievalDeal](#ClientCancelRetrievalDeal) - * [ClientDataTransferUpdates](#ClientDataTransferUpdates) - * [ClientDealPieceCID](#ClientDealPieceCID) - * [ClientDealSize](#ClientDealSize) - * [ClientExport](#ClientExport) - * [ClientFindData](#ClientFindData) - * [ClientGenCar](#ClientGenCar) - * [ClientGetDealInfo](#ClientGetDealInfo) - * [ClientGetDealStatus](#ClientGetDealStatus) - * [ClientGetDealUpdates](#ClientGetDealUpdates) - * [ClientGetRetrievalUpdates](#ClientGetRetrievalUpdates) - * [ClientHasLocal](#ClientHasLocal) - * [ClientImport](#ClientImport) - * [ClientListDataTransfers](#ClientListDataTransfers) - * [ClientListDeals](#ClientListDeals) - * [ClientListImports](#ClientListImports) - * [ClientListRetrievals](#ClientListRetrievals) - * [ClientMinerQueryOffer](#ClientMinerQueryOffer) - * [ClientQueryAsk](#ClientQueryAsk) - * [ClientRemoveImport](#ClientRemoveImport) - * [ClientRestartDataTransfer](#ClientRestartDataTransfer) - * [ClientRetrieve](#ClientRetrieve) - * [ClientRetrieveTryRestartInsufficientFunds](#ClientRetrieveTryRestartInsufficientFunds) - * [ClientRetrieveWait](#ClientRetrieveWait) - * [ClientStartDeal](#ClientStartDeal) - * [ClientStatelessDeal](#ClientStatelessDeal) * [Create](#Create) * [CreateBackup](#CreateBackup) * [Eth](#Eth) @@ -106,6 +77,7 @@ * [EthSyncing](#EthSyncing) * [EthTraceBlock](#EthTraceBlock) * [EthTraceReplayBlockTransactions](#EthTraceReplayBlockTransactions) + * [EthTraceTransaction](#EthTraceTransaction) * [EthUninstallFilter](#EthUninstallFilter) * [EthUnsubscribe](#EthUnsubscribe) * [Filecoin](#Filecoin) @@ -1253,1054 +1225,6 @@ Inputs: Response: `"0"` -## Client -The Client methods all have to do with interacting with the storage and -retrieval markets as a client - - -### ClientCalcCommP -ClientCalcCommP calculates the CommP for a specified file - - -Perms: write - -Inputs: -```json -[ - "string value" -] -``` - -Response: -```json -{ - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Size": 1024 -} -``` - -### ClientCancelDataTransfer -ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer - - -Perms: write - -Inputs: -```json -[ - 3, - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - true -] -``` - -Response: `{}` - -### ClientCancelRetrievalDeal -ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID - - -Perms: write - -Inputs: -```json -[ - 5 -] -``` - -Response: `{}` - -### ClientDataTransferUpdates - - -Perms: write - -Inputs: `null` - -Response: -```json -{ - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } -} -``` - -### ClientDealPieceCID -ClientCalcCommP calculates the CommP and data size of the specified CID - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: -```json -{ - "PayloadSize": 9, - "PieceSize": 1032, - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -} -``` - -### ClientDealSize -ClientDealSize calculates real deal data size - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: -```json -{ - "PayloadSize": 9, - "PieceSize": 1032 -} -``` - -### ClientExport -ClientExport exports a file stored in the local filestore to a system file - - -Perms: admin - -Inputs: -```json -[ - { - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "DAGs": [ - { - "DataSelector": "Links/21/Hash/Links/42/Hash", - "ExportMerkleProof": true - } - ], - "FromLocalCAR": "string value", - "DealID": 5 - }, - { - "Path": "string value", - "IsCAR": true - } -] -``` - -Response: `{}` - -### ClientFindData -ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer). - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: -```json -[ - { - "Err": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Piece": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Size": 42, - "MinPrice": "0", - "UnsealPrice": "0", - "PricePerByte": "0", - "PaymentInterval": 42, - "PaymentIntervalIncrease": 42, - "Miner": "f01234", - "MinerPeer": { - "Address": "f01234", - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - } - } -] -``` - -### ClientGenCar -ClientGenCar generates a CAR file for the specified file. - - -Perms: write - -Inputs: -```json -[ - { - "Path": "string value", - "IsCAR": true - }, - "string value" -] -``` - -Response: `{}` - -### ClientGetDealInfo -ClientGetDealInfo returns the latest information about a given deal. - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: -```json -{ - "ProposalCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "State": 42, - "Message": "string value", - "DealStages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "ExpectedDuration": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - }, - "Provider": "f01234", - "DataRef": { - "TransferType": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1024, - "RawBlockSize": 42 - }, - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Size": 42, - "PricePerEpoch": "0", - "Duration": 42, - "DealID": 5432, - "CreationTime": "0001-01-01T00:00:00Z", - "Verified": true, - "TransferChannelID": { - "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "ID": 3 - }, - "DataTransfer": { - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } - } -} -``` - -### ClientGetDealStatus -ClientGetDealStatus returns status given a code - - -Perms: read - -Inputs: -```json -[ - 42 -] -``` - -Response: `"string value"` - -### ClientGetDealUpdates -ClientGetDealUpdates returns the status of updated deals - - -Perms: write - -Inputs: `null` - -Response: -```json -{ - "ProposalCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "State": 42, - "Message": "string value", - "DealStages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "ExpectedDuration": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - }, - "Provider": "f01234", - "DataRef": { - "TransferType": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1024, - "RawBlockSize": 42 - }, - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Size": 42, - "PricePerEpoch": "0", - "Duration": 42, - "DealID": 5432, - "CreationTime": "0001-01-01T00:00:00Z", - "Verified": true, - "TransferChannelID": { - "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "ID": 3 - }, - "DataTransfer": { - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } - } -} -``` - -### ClientGetRetrievalUpdates -ClientGetRetrievalUpdates returns status of updated retrieval deals - - -Perms: write - -Inputs: `null` - -Response: -```json -{ - "PayloadCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "ID": 5, - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PricePerByte": "0", - "UnsealPrice": "0", - "Status": 0, - "Message": "string value", - "Provider": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "BytesReceived": 42, - "BytesPaidFor": 42, - "TotalPaid": "0", - "TransferChannelID": { - "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "ID": 3 - }, - "DataTransfer": { - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } - }, - "Event": 5 -} -``` - -### ClientHasLocal -ClientHasLocal indicates whether a certain CID is locally stored. - - -Perms: write - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: `true` - -### ClientImport -ClientImport imports file under the specified path into filestore. - - -Perms: admin - -Inputs: -```json -[ - { - "Path": "string value", - "IsCAR": true - } -] -``` - -Response: -```json -{ - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "ImportID": 50 -} -``` - -### ClientListDataTransfers -ClientListTransfers returns the status of all ongoing transfers of data - - -Perms: write - -Inputs: `null` - -Response: -```json -[ - { - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } - } -] -``` - -### ClientListDeals -ClientListDeals returns information about the deals made by the local client. - - -Perms: write - -Inputs: `null` - -Response: -```json -[ - { - "ProposalCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "State": 42, - "Message": "string value", - "DealStages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "ExpectedDuration": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - }, - "Provider": "f01234", - "DataRef": { - "TransferType": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1024, - "RawBlockSize": 42 - }, - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Size": 42, - "PricePerEpoch": "0", - "Duration": 42, - "DealID": 5432, - "CreationTime": "0001-01-01T00:00:00Z", - "Verified": true, - "TransferChannelID": { - "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "ID": 3 - }, - "DataTransfer": { - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } - } - } -] -``` - -### ClientListImports -ClientListImports lists imported files and their root CIDs - - -Perms: write - -Inputs: `null` - -Response: -```json -[ - { - "Key": 50, - "Err": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Source": "string value", - "FilePath": "string value", - "CARPath": "string value" - } -] -``` - -### ClientListRetrievals -ClientListRetrievals returns information about retrievals made by the local client - - -Perms: write - -Inputs: `null` - -Response: -```json -[ - { - "PayloadCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "ID": 5, - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PricePerByte": "0", - "UnsealPrice": "0", - "Status": 0, - "Message": "string value", - "Provider": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "BytesReceived": 42, - "BytesPaidFor": 42, - "TotalPaid": "0", - "TransferChannelID": { - "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "ID": 3 - }, - "DataTransfer": { - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42, - "Stages": { - "Stages": [ - { - "Name": "string value", - "Description": "string value", - "CreatedTime": "0001-01-01T00:00:00Z", - "UpdatedTime": "0001-01-01T00:00:00Z", - "Logs": [ - { - "Log": "string value", - "UpdatedTime": "0001-01-01T00:00:00Z" - } - ] - } - ] - } - }, - "Event": 5 - } -] -``` - -### ClientMinerQueryOffer -ClientMinerQueryOffer returns a QueryOffer for the specific miner and file. - - -Perms: read - -Inputs: -```json -[ - "f01234", - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: -```json -{ - "Err": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Piece": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Size": 42, - "MinPrice": "0", - "UnsealPrice": "0", - "PricePerByte": "0", - "PaymentInterval": 42, - "PaymentIntervalIncrease": 42, - "Miner": "f01234", - "MinerPeer": { - "Address": "f01234", - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - } -} -``` - -### ClientQueryAsk -ClientQueryAsk returns a signed StorageAsk from the specified miner. - - -Perms: read - -Inputs: -```json -[ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "f01234" -] -``` - -Response: -```json -{ - "Response": { - "Price": "0", - "VerifiedPrice": "0", - "MinPieceSize": 1032, - "MaxPieceSize": 1032, - "Miner": "f01234", - "Timestamp": 10101, - "Expiry": 10101, - "SeqNo": 42 - }, - "DealProtocols": [ - "string value" - ] -} -``` - -### ClientRemoveImport -ClientRemoveImport removes file import - - -Perms: admin - -Inputs: -```json -[ - 50 -] -``` - -Response: `{}` - -### ClientRestartDataTransfer -ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer - - -Perms: write - -Inputs: -```json -[ - 3, - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - true -] -``` - -Response: `{}` - -### ClientRetrieve -ClientRetrieve initiates the retrieval of a file, as specified in the order. - - -Perms: admin - -Inputs: -```json -[ - { - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Piece": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "DataSelector": "Links/21/Hash/Links/42/Hash", - "Size": 42, - "Total": "0", - "UnsealPrice": "0", - "PaymentInterval": 42, - "PaymentIntervalIncrease": 42, - "Client": "f01234", - "Miner": "f01234", - "MinerPeer": { - "Address": "f01234", - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } - }, - "RemoteStore": "00000000-0000-0000-0000-000000000000" - } -] -``` - -Response: -```json -{ - "DealID": 5 -} -``` - -### ClientRetrieveTryRestartInsufficientFunds -ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel -which are stuck due to insufficient funds - - -Perms: write - -Inputs: -```json -[ - "f01234" -] -``` - -Response: `{}` - -### ClientRetrieveWait -ClientRetrieveWait waits for retrieval to be complete - - -Perms: admin - -Inputs: -```json -[ - 5 -] -``` - -Response: `{}` - -### ClientStartDeal -ClientStartDeal proposes a deal with a miner. - - -Perms: admin - -Inputs: -```json -[ - { - "Data": { - "TransferType": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1024, - "RawBlockSize": 42 - }, - "Wallet": "f01234", - "Miner": "f01234", - "EpochPrice": "0", - "MinBlocksDuration": 42, - "ProviderCollateral": "0", - "DealStartEpoch": 10101, - "FastRetrieval": true, - "VerifiedDeal": true - } -] -``` - -Response: -```json -{ - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" -} -``` - -### ClientStatelessDeal -ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking. - - -Perms: write - -Inputs: -```json -[ - { - "Data": { - "TransferType": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1024, - "RawBlockSize": 42 - }, - "Wallet": "f01234", - "Miner": "f01234", - "EpochPrice": "0", - "MinBlocksDuration": 42, - "ProviderCollateral": "0", - "DealStartEpoch": 10101, - "FastRetrieval": true, - "VerifiedDeal": true - } -] -``` - -Response: -```json -{ - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" -} -``` - ## Create @@ -2728,6 +1652,7 @@ Response: "gas": "0x5", "maxFeePerGas": "0x0", "maxPriorityFeePerGas": "0x0", + "gasPrice": "0x0", "accessList": [ "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e" ], @@ -2767,6 +1692,7 @@ Response: "gas": "0x5", "maxFeePerGas": "0x0", "maxPriorityFeePerGas": "0x0", + "gasPrice": "0x0", "accessList": [ "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e" ], @@ -2805,6 +1731,7 @@ Response: "gas": "0x5", "maxFeePerGas": "0x0", "maxPriorityFeePerGas": "0x0", + "gasPrice": "0x0", "accessList": [ "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e" ], @@ -2844,6 +1771,7 @@ Response: "gas": "0x5", "maxFeePerGas": "0x0", "maxPriorityFeePerGas": "0x0", + "gasPrice": "0x0", "accessList": [ "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e" ], @@ -3171,6 +2099,39 @@ Response: ] ``` +### EthTraceTransaction +Implmements OpenEthereum-compatible API method trace_transaction + + +Perms: read + +Inputs: +```json +[ + "string value" +] +``` + +Response: +```json +[ + { + "type": "string value", + "error": "string value", + "subtraces": 123, + "traceAddress": [ + 123 + ], + "action": {}, + "result": {}, + "blockHash": "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e", + "blockNumber": 9, + "transactionHash": "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e", + "transactionPosition": 123 + } +] +``` + ### EthUninstallFilter Uninstalls a filter with given id. @@ -6298,7 +5259,7 @@ Perms: read Inputs: ```json [ - 22 + 23 ] ``` @@ -6313,7 +5274,7 @@ Perms: read Inputs: ```json [ - 22 + 23 ] ``` @@ -7223,7 +6184,8 @@ Response: "UpgradeThunderHeight": 10101, "UpgradeWatermelonHeight": 10101, "UpgradeDragonHeight": 10101, - "UpgradePhoenixHeight": 10101 + "UpgradePhoenixHeight": 10101, + "UpgradeAussieHeight": 10101 }, "Eip155ChainID": 123 } @@ -7528,6 +6490,7 @@ Response: "ClientCollateral": "0" }, "State": { + "SectorNumber": 9, "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, "SlashEpoch": 10101 @@ -7606,6 +6569,7 @@ Response: "ClientCollateral": "0" }, "State": { + "SectorNumber": 9, "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, "SlashEpoch": 10101 @@ -8201,7 +7165,7 @@ Inputs: ] ``` -Response: `22` +Response: `23` ### StateReadState StateReadState returns the indicated actor's state. diff --git a/documentation/en/cli-curio.md b/documentation/en/cli-curio.md deleted file mode 100644 index 9f578154c7a..00000000000 --- a/documentation/en/cli-curio.md +++ /dev/null @@ -1,563 +0,0 @@ -# curio -``` -NAME: - curio - Filecoin decentralized storage network provider - -USAGE: - curio [global options] command [command options] [arguments...] - -VERSION: - 1.27.0 - -COMMANDS: - cli Execute cli commands - run Start a Curio process - stop Stop a running Curio process - config Manage node config by layers. The layer 'base' will always be applied at Curio start-up. - test Utility functions for testing - web Start Curio web interface - guided-setup Run the guided setup for migrating from lotus-miner to Curio or Creating a new Curio miner - seal Manage the sealing pipeline - market - fetch-params Fetch proving parameters - help, h Shows a list of commands or help for one command - -GLOBAL OPTIONS: - --color use color in display output (default: depends on output being a TTY) - --db-host value Command separated list of hostnames for yugabyte cluster (default: "127.0.0.1") [$CURIO_DB_HOST, $CURIO_HARMONYDB_HOSTS] - --db-name value (default: "yugabyte") [$CURIO_DB_NAME, $CURIO_HARMONYDB_NAME] - --db-user value (default: "yugabyte") [$CURIO_DB_USER, $CURIO_HARMONYDB_USERNAME] - --db-password value (default: "yugabyte") [$CURIO_DB_PASSWORD, $CURIO_HARMONYDB_PASSWORD] - --db-port value (default: "5433") [$CURIO_DB_PORT, $CURIO_HARMONYDB_PORT] - --repo-path value (default: "~/.curio") [$CURIO_REPO_PATH] - --vv enables very verbose mode, useful for debugging the CLI (default: false) - --help, -h show help - --version, -v print the version -``` - -## curio cli -``` -NAME: - curio cli - Execute cli commands - -USAGE: - curio cli command [command options] [arguments...] - -COMMANDS: - storage manage sector storage - log Manage logging - wait-api Wait for Curio api to come online - help, h Shows a list of commands or help for one command - -OPTIONS: - --machine value machine host:port (curio run --listen address) - --help, -h show help -``` - -### curio cli storage -``` -NAME: - curio cli storage - manage sector storage - -USAGE: - curio cli storage command [command options] [arguments...] - -DESCRIPTION: - Sectors can be stored across many filesystem paths. These - commands provide ways to manage the storage the miner will used to store sectors - long term for proving (references as 'store') as well as how sectors will be - stored while moving through the sealing pipeline (references as 'seal'). - -COMMANDS: - attach attach local storage path - detach detach local storage path - list list local storage paths - find find sector in the storage system - help, h Shows a list of commands or help for one command - -OPTIONS: - --help, -h show help -``` - -#### curio cli storage attach -``` -NAME: - curio cli storage attach - attach local storage path - -USAGE: - curio cli storage attach [command options] [path] - -DESCRIPTION: - Storage can be attached to the miner using this command. The storage volume - list is stored local to the miner in storage.json set in curio run. We do not - recommend manually modifying this value without further understanding of the - storage system. - - Each storage volume contains a configuration file which describes the - capabilities of the volume. When the '--init' flag is provided, this file will - be created using the additional flags. - - Weight - A high weight value means data will be more likely to be stored in this path - - Seal - Data for the sealing process will be stored here - - Store - Finalized sectors that will be moved here for long term storage and be proven - over time - - -OPTIONS: - --init initialize the path first (default: false) - --weight value (for init) path weight (default: 10) - --seal (for init) use path for sealing (default: false) - --store (for init) use path for long-term storage (default: false) - --max-storage value (for init) limit storage space for sectors (expensive for very large paths!) - --groups value [ --groups value ] path group names - --allow-to value [ --allow-to value ] path groups allowed to pull data from this path (allow all if not specified) - --help, -h show help -``` - -#### curio cli storage detach -``` -NAME: - curio cli storage detach - detach local storage path - -USAGE: - curio cli storage detach [command options] [path] - -OPTIONS: - --really-do-it (default: false) - --help, -h show help -``` - -#### curio cli storage list -``` -NAME: - curio cli storage list - list local storage paths - -USAGE: - curio cli storage list [command options] [arguments...] - -OPTIONS: - --local only list local storage paths (default: false) - --help, -h show help -``` - -#### curio cli storage find -``` -NAME: - curio cli storage find - find sector in the storage system - -USAGE: - curio cli storage find [command options] [miner address] [sector number] - -OPTIONS: - --help, -h show help -``` - -### curio cli log -``` -NAME: - curio cli log - Manage logging - -USAGE: - curio cli log command [command options] [arguments...] - -COMMANDS: - list List log systems - set-level Set log level - help, h Shows a list of commands or help for one command - -OPTIONS: - --help, -h show help -``` - -#### curio cli log list -``` -NAME: - curio cli log list - List log systems - -USAGE: - curio cli log list [command options] [arguments...] - -OPTIONS: - --help, -h show help -``` - -#### curio cli log set-level -``` -NAME: - curio cli log set-level - Set log level - -USAGE: - curio cli log set-level [command options] [level] - -DESCRIPTION: - Set the log level for logging systems: - - The system flag can be specified multiple times. - - eg) log set-level --system chain --system chainxchg debug - - Available Levels: - debug - info - warn - error - - Environment Variables: - GOLOG_LOG_LEVEL - Default log level for all log systems - GOLOG_LOG_FMT - Change output log format (json, nocolor) - GOLOG_FILE - Write logs to file - GOLOG_OUTPUT - Specify whether to output to file, stderr, stdout or a combination, i.e. file+stderr - - -OPTIONS: - --system value [ --system value ] limit to log system - --help, -h show help -``` - -### curio cli wait-api -``` -NAME: - curio cli wait-api - Wait for Curio api to come online - -USAGE: - curio cli wait-api [command options] [arguments...] - -OPTIONS: - --timeout value duration to wait till fail (default: 30s) - --help, -h show help -``` - -## curio run -``` -NAME: - curio run - Start a Curio process - -USAGE: - curio run [command options] [arguments...] - -OPTIONS: - --listen value host address and port the worker api will listen on (default: "0.0.0.0:12300") [$LOTUS_WORKER_LISTEN] - --nosync don't check full-node sync status (default: false) - --manage-fdlimit manage open file limit (default: true) - --storage-json value path to json file containing storage config (default: "~/.curio/storage.json") - --journal value path to journal files (default: "~/.curio/") - --layers value, -l value, --layer value [ --layers value, -l value, --layer value ] list of layers to be interpreted (atop defaults). Default: base - --help, -h show help -``` - -## curio stop -``` -NAME: - curio stop - Stop a running Curio process - -USAGE: - curio stop [command options] [arguments...] - -OPTIONS: - --help, -h show help -``` - -## curio config -``` -NAME: - curio config - Manage node config by layers. The layer 'base' will always be applied at Curio start-up. - -USAGE: - curio config command [command options] [arguments...] - -COMMANDS: - default, defaults Print default node config - set, add, update, create Set a config layer or the base by providing a filename or stdin. - get, cat, show Get a config layer by name. You may want to pipe the output to a file, or use 'less' - list, ls List config layers present in the DB. - interpret, view, stacked, stack Interpret stacked config layers by this version of curio, with system-generated comments. - remove, rm, del, delete Remove a named config layer. - edit edit a config layer - new-cluster Create new configuration for a new cluster - help, h Shows a list of commands or help for one command - -OPTIONS: - --help, -h show help -``` - -### curio config default -``` -NAME: - curio config default - Print default node config - -USAGE: - curio config default [command options] [arguments...] - -OPTIONS: - --no-comment don't comment default values (default: false) - --help, -h show help -``` - -### curio config set -``` -NAME: - curio config set - Set a config layer or the base by providing a filename or stdin. - -USAGE: - curio config set [command options] a layer's file name - -OPTIONS: - --title value title of the config layer (req'd for stdin) - --help, -h show help -``` - -### curio config get -``` -NAME: - curio config get - Get a config layer by name. You may want to pipe the output to a file, or use 'less' - -USAGE: - curio config get [command options] layer name - -OPTIONS: - --help, -h show help -``` - -### curio config list -``` -NAME: - curio config list - List config layers present in the DB. - -USAGE: - curio config list [command options] [arguments...] - -OPTIONS: - --help, -h show help -``` - -### curio config interpret -``` -NAME: - curio config interpret - Interpret stacked config layers by this version of curio, with system-generated comments. - -USAGE: - curio config interpret [command options] a list of layers to be interpreted as the final config - -OPTIONS: - --layers value [ --layers value ] comma or space separated list of layers to be interpreted (base is always applied) - --help, -h show help -``` - -### curio config remove -``` -NAME: - curio config remove - Remove a named config layer. - -USAGE: - curio config remove [command options] [arguments...] - -OPTIONS: - --help, -h show help -``` - -### curio config edit -``` -NAME: - curio config edit - edit a config layer - -USAGE: - curio config edit [command options] [layer name] - -OPTIONS: - --editor value editor to use (default: "vim") [$EDITOR] - --source value source config layer (default: ) - --allow-overwrite allow overwrite of existing layer if source is a different layer (default: false) - --no-source-diff save the whole config into the layer, not just the diff (default: false) - --no-interpret-source do not interpret source layer (default: true if --source is set) - --help, -h show help -``` - -### curio config new-cluster -``` -NAME: - curio config new-cluster - Create new configuration for a new cluster - -USAGE: - curio config new-cluster [command options] [SP actor address...] - -OPTIONS: - --help, -h show help -``` - -## curio test -``` -NAME: - curio test - Utility functions for testing - -USAGE: - curio test command [command options] [arguments...] - -COMMANDS: - window-post, wd, windowpost, wdpost Compute a proof-of-spacetime for a sector (requires the sector to be pre-sealed). These will not send to the chain. - help, h Shows a list of commands or help for one command - -OPTIONS: - --help, -h show help -``` - -### curio test window-post -``` -NAME: - curio test window-post - Compute a proof-of-spacetime for a sector (requires the sector to be pre-sealed). These will not send to the chain. - -USAGE: - curio test window-post command [command options] [arguments...] - -COMMANDS: - here, cli Compute WindowPoSt for performance and configuration testing. - task, scheduled, schedule, async, asynchronous Test the windowpost scheduler by running it on the next available curio. - help, h Shows a list of commands or help for one command - -OPTIONS: - --help, -h show help -``` - -#### curio test window-post here -``` -NAME: - curio test window-post here - Compute WindowPoSt for performance and configuration testing. - -USAGE: - curio test window-post here [command options] [deadline index] - -DESCRIPTION: - Note: This command is intended to be used to verify PoSt compute performance. - It will not send any messages to the chain. Since it can compute any deadline, output may be incorrectly timed for the chain. - -OPTIONS: - --deadline value deadline to compute WindowPoSt for (default: 0) - --layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base - --storage-json value path to json file containing storage config (default: "~/.curio/storage.json") - --partition value partition to compute WindowPoSt for (default: 0) - --help, -h show help -``` - -#### curio test window-post task -``` -NAME: - curio test window-post task - Test the windowpost scheduler by running it on the next available curio. - -USAGE: - curio test window-post task [command options] [arguments...] - -OPTIONS: - --deadline value deadline to compute WindowPoSt for (default: 0) - --layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base - --help, -h show help -``` - -## curio web -``` -NAME: - curio web - Start Curio web interface - -USAGE: - curio web [command options] [arguments...] - -DESCRIPTION: - Start an instance of Curio web interface. - This creates the 'web' layer if it does not exist, then calls run with that layer. - -OPTIONS: - --listen value Address to listen on (default: "127.0.0.1:4701") - --nosync don't check full-node sync status (default: false) - --layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base - --help, -h show help -``` - -## curio guided-setup -``` -NAME: - curio guided-setup - Run the guided setup for migrating from lotus-miner to Curio or Creating a new Curio miner - -USAGE: - curio guided-setup [command options] [arguments...] - -OPTIONS: - --help, -h show help -``` - -## curio seal -``` -NAME: - curio seal - Manage the sealing pipeline - -USAGE: - curio seal command [command options] [arguments...] - -COMMANDS: - start Start new sealing operations manually - help, h Shows a list of commands or help for one command - -OPTIONS: - --help, -h show help -``` - -### curio seal start -``` -NAME: - curio seal start - Start new sealing operations manually - -USAGE: - curio seal start [command options] [arguments...] - -OPTIONS: - --actor value Specify actor address to start sealing sectors for - --now Start sealing sectors for all actors now (not on schedule) (default: false) - --cc Start sealing new CC sectors (default: false) - --count value Number of sectors to start (default: 1) - --synthetic Use synthetic PoRep (default: false) - --layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base - --help, -h show help -``` - -## curio market -``` -NAME: - curio market - -USAGE: - curio market command [command options] [arguments...] - -COMMANDS: - rpc-info - help, h Shows a list of commands or help for one command - -OPTIONS: - --help, -h show help -``` - -### curio market rpc-info -``` -NAME: - curio market rpc-info - -USAGE: - curio market rpc-info [command options] [arguments...] - -OPTIONS: - --layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base - --help, -h show help -``` - -## curio fetch-params -``` -NAME: - curio fetch-params - Fetch proving parameters - -USAGE: - curio fetch-params [command options] [sectorSize] - -OPTIONS: - --help, -h show help -``` diff --git a/documentation/en/cli-lotus-miner.md b/documentation/en/cli-lotus-miner.md index b54508224c6..dbf8853ba33 100644 --- a/documentation/en/cli-lotus-miner.md +++ b/documentation/en/cli-lotus-miner.md @@ -7,7 +7,7 @@ USAGE: lotus-miner [global options] command [command options] [arguments...] VERSION: - 1.27.0 + 1.27.1 COMMANDS: init Initialize a lotus miner repo @@ -49,7 +49,6 @@ USAGE: COMMANDS: restore Initialize a lotus miner repo from a backup - service Initialize a lotus miner sub-service help, h Shows a list of commands or help for one command OPTIONS: @@ -84,23 +83,6 @@ OPTIONS: --help, -h show help ``` -### lotus-miner init service -``` -NAME: - lotus-miner init service - Initialize a lotus miner sub-service - -USAGE: - lotus-miner init service [command options] [backupFile] - -OPTIONS: - --config value config file (config.toml) - --nosync don't check full-node sync status (default: false) - --type value [ --type value ] type of service to be enabled - --api-sealer value sealer API info (lotus-miner auth api-info --perm=admin) - --api-sector-index value sector Index API info (lotus-miner auth api-info --perm=admin) - --help, -h show help -``` - ## lotus-miner run ``` NAME: diff --git a/documentation/en/cli-lotus-provider.md b/documentation/en/cli-lotus-provider.md deleted file mode 100644 index 0fae817d2dd..00000000000 --- a/documentation/en/cli-lotus-provider.md +++ /dev/null @@ -1,410 +0,0 @@ -# lotus-provider -``` -NAME: - lotus-provider - Filecoin decentralized storage network provider - -USAGE: - lotus-provider [global options] command [command options] [arguments...] - -VERSION: - 1.26.3 - -COMMANDS: - run Start a lotus provider process - stop Stop a running lotus provider - config Manage node config by layers. The layer 'base' will always be applied. - test Utility functions for testing - version Print version - help, h Shows a list of commands or help for one command - DEVELOPER: - auth Manage RPC permissions - log Manage logging - wait-api Wait for lotus api to come online - fetch-params Fetch proving parameters - -GLOBAL OPTIONS: - --color use color in display output (default: depends on output being a TTY) - --db-host value Command separated list of hostnames for yugabyte cluster (default: "yugabyte") [$LOTUS_DB_HOST] - --db-name value (default: "yugabyte") [$LOTUS_DB_NAME, $LOTUS_HARMONYDB_HOSTS] - --db-user value (default: "yugabyte") [$LOTUS_DB_USER, $LOTUS_HARMONYDB_USERNAME] - --db-password value (default: "yugabyte") [$LOTUS_DB_PASSWORD, $LOTUS_HARMONYDB_PASSWORD] - --layers value (default: "base") [$LOTUS_LAYERS, $LOTUS_CONFIG_LAYERS] - --repo-path value (default: "~/.lotusprovider") [$LOTUS_REPO_PATH] - --vv enables very verbose mode, useful for debugging the CLI (default: false) - --help, -h show help - --version, -v print the version -``` - -## lotus-provider run -``` -NAME: - lotus-provider run - Start a lotus provider process - -USAGE: - lotus-provider run [command options] [arguments...] - -OPTIONS: - --listen value host address and port the worker api will listen on (default: "0.0.0.0:12300") [$LOTUS_WORKER_LISTEN] - --nosync don't check full-node sync status (default: false) - --manage-fdlimit manage open file limit (default: true) - --layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base (default: "base") - --storage-json value path to json file containing storage config (default: "~/.lotus-provider/storage.json") - --journal value path to journal files (default: "~/.lotus-provider/") - --help, -h show help -``` - -## lotus-provider stop -``` -NAME: - lotus-provider stop - Stop a running lotus provider - -USAGE: - lotus-provider stop [command options] [arguments...] - -OPTIONS: - --help, -h show help -``` - -## lotus-provider config -``` -NAME: - lotus-provider config - Manage node config by layers. The layer 'base' will always be applied. - -USAGE: - lotus-provider config command [command options] [arguments...] - -COMMANDS: - default, defaults Print default node config - set, add, update, create Set a config layer or the base by providing a filename or stdin. - get, cat, show Get a config layer by name. You may want to pipe the output to a file, or use 'less' - list, ls List config layers you can get. - interpret, view, stacked, stack Interpret stacked config layers by this version of lotus-provider, with system-generated comments. - remove, rm, del, delete Remove a named config layer. - from-miner Express a database config (for lotus-provider) from an existing miner. - help, h Shows a list of commands or help for one command - -OPTIONS: - --help, -h show help -``` - -### lotus-provider config default -``` -NAME: - lotus-provider config default - Print default node config - -USAGE: - lotus-provider config default [command options] [arguments...] - -OPTIONS: - --no-comment don't comment default values (default: false) - --help, -h show help -``` - -### lotus-provider config set -``` -NAME: - lotus-provider config set - Set a config layer or the base by providing a filename or stdin. - -USAGE: - lotus-provider config set [command options] a layer's file name - -OPTIONS: - --title value title of the config layer (req'd for stdin) - --help, -h show help -``` - -### lotus-provider config get -``` -NAME: - lotus-provider config get - Get a config layer by name. You may want to pipe the output to a file, or use 'less' - -USAGE: - lotus-provider config get [command options] layer name - -OPTIONS: - --help, -h show help -``` - -### lotus-provider config list -``` -NAME: - lotus-provider config list - List config layers you can get. - -USAGE: - lotus-provider config list [command options] [arguments...] - -OPTIONS: - --help, -h show help -``` - -### lotus-provider config interpret -``` -NAME: - lotus-provider config interpret - Interpret stacked config layers by this version of lotus-provider, with system-generated comments. - -USAGE: - lotus-provider config interpret [command options] a list of layers to be interpreted as the final config - -OPTIONS: - --layers value [ --layers value ] comma or space separated list of layers to be interpreted (default: "base") - --help, -h show help -``` - -### lotus-provider config remove -``` -NAME: - lotus-provider config remove - Remove a named config layer. - -USAGE: - lotus-provider config remove [command options] [arguments...] - -OPTIONS: - --help, -h show help -``` - -### lotus-provider config from-miner -``` -NAME: - lotus-provider config from-miner - Express a database config (for lotus-provider) from an existing miner. - -USAGE: - lotus-provider config from-miner [command options] [arguments...] - -DESCRIPTION: - Express a database config (for lotus-provider) from an existing miner. - -OPTIONS: - --miner-repo value, --storagerepo value Specify miner repo path. flag(storagerepo) and env(LOTUS_STORAGE_PATH) are DEPRECATION, will REMOVE SOON (default: "~/.lotusminer") [$LOTUS_MINER_PATH, $LOTUS_STORAGE_PATH] - --to-layer value, -t value The layer name for this data push. 'base' is recommended for single-miner setup. - --overwrite, -o Use this with --to-layer to replace an existing layer (default: false) - --help, -h show help -``` - -## lotus-provider test -``` -NAME: - lotus-provider test - Utility functions for testing - -USAGE: - lotus-provider test command [command options] [arguments...] - -COMMANDS: - window-post, wd, windowpost, wdpost Compute a proof-of-spacetime for a sector (requires the sector to be pre-sealed). These will not send to the chain. - help, h Shows a list of commands or help for one command - -OPTIONS: - --help, -h show help -``` - -### lotus-provider test window-post -``` -NAME: - lotus-provider test window-post - Compute a proof-of-spacetime for a sector (requires the sector to be pre-sealed). These will not send to the chain. - -USAGE: - lotus-provider test window-post command [command options] [arguments...] - -COMMANDS: - here, cli Compute WindowPoSt for performance and configuration testing. - task, scheduled, schedule, async, asynchronous Test the windowpost scheduler by running it on the next available lotus-provider. - help, h Shows a list of commands or help for one command - -OPTIONS: - --help, -h show help -``` - -#### lotus-provider test window-post here -``` -NAME: - lotus-provider test window-post here - Compute WindowPoSt for performance and configuration testing. - -USAGE: - lotus-provider test window-post here [command options] [deadline index] - -DESCRIPTION: - Note: This command is intended to be used to verify PoSt compute performance. - It will not send any messages to the chain. Since it can compute any deadline, output may be incorrectly timed for the chain. - -OPTIONS: - --deadline value deadline to compute WindowPoSt for (default: 0) - --layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base (default: "base") - --storage-json value path to json file containing storage config (default: "~/.lotus-provider/storage.json") - --partition value partition to compute WindowPoSt for (default: 0) - --help, -h show help -``` - -#### lotus-provider test window-post task -``` -NAME: - lotus-provider test window-post task - Test the windowpost scheduler by running it on the next available lotus-provider. - -USAGE: - lotus-provider test window-post task [command options] [arguments...] - -OPTIONS: - --deadline value deadline to compute WindowPoSt for (default: 0) - --layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base (default: "base") - --help, -h show help -``` - -## lotus-provider version -``` -NAME: - lotus-provider version - Print version - -USAGE: - lotus-provider version [command options] [arguments...] - -OPTIONS: - --help, -h show help -``` - -## lotus-provider auth -``` -NAME: - lotus-provider auth - Manage RPC permissions - -USAGE: - lotus-provider auth command [command options] [arguments...] - -COMMANDS: - create-token Create token - api-info Get token with API info required to connect to this node - help, h Shows a list of commands or help for one command - -OPTIONS: - --help, -h show help -``` - -### lotus-provider auth create-token -``` -NAME: - lotus-provider auth create-token - Create token - -USAGE: - lotus-provider auth create-token [command options] [arguments...] - -OPTIONS: - --perm value permission to assign to the token, one of: read, write, sign, admin - --help, -h show help -``` - -### lotus-provider auth api-info -``` -NAME: - lotus-provider auth api-info - Get token with API info required to connect to this node - -USAGE: - lotus-provider auth api-info [command options] [arguments...] - -OPTIONS: - --perm value permission to assign to the token, one of: read, write, sign, admin - --help, -h show help -``` - -## lotus-provider log -``` -NAME: - lotus-provider log - Manage logging - -USAGE: - lotus-provider log command [command options] [arguments...] - -COMMANDS: - list List log systems - set-level Set log level - alerts Get alert states - help, h Shows a list of commands or help for one command - -OPTIONS: - --help, -h show help -``` - -### lotus-provider log list -``` -NAME: - lotus-provider log list - List log systems - -USAGE: - lotus-provider log list [command options] [arguments...] - -OPTIONS: - --help, -h show help -``` - -### lotus-provider log set-level -``` -NAME: - lotus-provider log set-level - Set log level - -USAGE: - lotus-provider log set-level [command options] [level] - -DESCRIPTION: - Set the log level for logging systems: - - The system flag can be specified multiple times. - - eg) log set-level --system chain --system chainxchg debug - - Available Levels: - debug - info - warn - error - - Environment Variables: - GOLOG_LOG_LEVEL - Default log level for all log systems - GOLOG_LOG_FMT - Change output log format (json, nocolor) - GOLOG_FILE - Write logs to file - GOLOG_OUTPUT - Specify whether to output to file, stderr, stdout or a combination, i.e. file+stderr - - -OPTIONS: - --system value [ --system value ] limit to log system - --help, -h show help -``` - -### lotus-provider log alerts -``` -NAME: - lotus-provider log alerts - Get alert states - -USAGE: - lotus-provider log alerts [command options] [arguments...] - -OPTIONS: - --all get all (active and inactive) alerts (default: false) - --help, -h show help -``` - -## lotus-provider wait-api -``` -NAME: - lotus-provider wait-api - Wait for lotus api to come online - -USAGE: - lotus-provider wait-api [command options] [arguments...] - -CATEGORY: - DEVELOPER - -OPTIONS: - --timeout value duration to wait till fail (default: 30s) - --help, -h show help -``` - -## lotus-provider fetch-params -``` -NAME: - lotus-provider fetch-params - Fetch proving parameters - -USAGE: - lotus-provider fetch-params [command options] [sectorSize] - -CATEGORY: - DEVELOPER - -OPTIONS: - --help, -h show help -``` diff --git a/documentation/en/cli-lotus-worker.md b/documentation/en/cli-lotus-worker.md index 441ba14ab88..d5477b97796 100644 --- a/documentation/en/cli-lotus-worker.md +++ b/documentation/en/cli-lotus-worker.md @@ -7,7 +7,7 @@ USAGE: lotus-worker [global options] command [command options] [arguments...] VERSION: - 1.27.0 + 1.27.1 COMMANDS: run Start lotus worker diff --git a/documentation/en/cli-lotus.md b/documentation/en/cli-lotus.md index a501dd732c9..f7e504f00c0 100644 --- a/documentation/en/cli-lotus.md +++ b/documentation/en/cli-lotus.md @@ -7,7 +7,7 @@ USAGE: lotus [global options] command [command options] [arguments...] VERSION: - 1.27.0 + 1.27.1 COMMANDS: daemon Start a lotus daemon process @@ -19,7 +19,6 @@ COMMANDS: send Send funds between accounts wallet Manage wallet info Print node info - client Make deals, store data, retrieve data msig Interact with a multisig wallet filplus Interact with the verified registry actor used by Filplus paych Manage payment channels @@ -403,515 +402,6 @@ OPTIONS: --help, -h show help ``` -## lotus client -``` -NAME: - lotus client - Make deals, store data, retrieve data - -USAGE: - lotus client command [command options] [arguments...] - -COMMANDS: - help, h Shows a list of commands or help for one command - DATA: - import Import data - drop Remove import - local List locally imported data - stat Print information about a locally stored file (piece size, etc) - RETRIEVAL: - find Find data in the network - retrieval-ask Get a miner's retrieval ask - retrieve Retrieve data from network - cat Show data from network - ls List object links - cancel-retrieval Cancel a retrieval deal by deal ID; this also cancels the associated transfer - list-retrievals List retrieval market deals - STORAGE: - deal Initialize storage deal with a miner - query-ask Find a miners ask - list-deals List storage market deals - get-deal Print detailed deal information - list-asks List asks for top miners - deal-stats Print statistics about local storage deals - inspect-deal Inspect detailed information about deal's lifecycle and the various stages it goes through - UTIL: - commP Calculate the piece-cid (commP) of a CAR file - generate-car Generate a car file from input - balances Print storage market client balances - list-transfers List ongoing data transfers for deals - restart-transfer Force restart a stalled data transfer - cancel-transfer Force cancel a data transfer - -OPTIONS: - --help, -h show help -``` - -### lotus client import -``` -NAME: - lotus client import - Import data - -USAGE: - lotus client import [command options] [inputPath] - -CATEGORY: - DATA - -OPTIONS: - --car import from a car file instead of a regular file (default: false) - --quiet, -q Output root CID only (default: false) - --help, -h show help -``` - -### lotus client drop -``` -NAME: - lotus client drop - Remove import - -USAGE: - lotus client drop [command options] [import ID...] - -CATEGORY: - DATA - -OPTIONS: - --help, -h show help -``` - -### lotus client local -``` -NAME: - lotus client local - List locally imported data - -USAGE: - lotus client local [command options] [arguments...] - -CATEGORY: - DATA - -OPTIONS: - --help, -h show help -``` - -### lotus client stat -``` -NAME: - lotus client stat - Print information about a locally stored file (piece size, etc) - -USAGE: - lotus client stat [command options] - -CATEGORY: - DATA - -OPTIONS: - --help, -h show help -``` - -### lotus client find -``` -NAME: - lotus client find - Find data in the network - -USAGE: - lotus client find [command options] [dataCid] - -CATEGORY: - RETRIEVAL - -OPTIONS: - --pieceCid value require data to be retrieved from a specific Piece CID - --help, -h show help -``` - -### lotus client retrieval-ask -``` -NAME: - lotus client retrieval-ask - Get a miner's retrieval ask - -USAGE: - lotus client retrieval-ask [command options] [minerAddress] [data CID] - -CATEGORY: - RETRIEVAL - -OPTIONS: - --size value data size in bytes (default: 0) - --help, -h show help -``` - -### lotus client retrieve -``` -NAME: - lotus client retrieve - Retrieve data from network - -USAGE: - lotus client retrieve [command options] [dataCid outputPath] - -CATEGORY: - RETRIEVAL - -DESCRIPTION: - Retrieve data from the Filecoin network. - - The retrieve command will attempt to find a provider make a retrieval deal with - them. In case a provider can't be found, it can be specified with the --provider - flag. - - By default the data will be interpreted as DAG-PB UnixFSv1 File. Alternatively - a CAR file containing the raw IPLD graph can be exported by setting the --car - flag. - - Partial Retrieval: - - The --data-selector flag can be used to specify a sub-graph to fetch. The - selector can be specified as either IPLD datamodel text-path selector, or IPLD - json selector. - - In case of unixfs retrieval, the selector must point at a single root node, and - match the entire graph under that node. - - In case of CAR retrieval, the selector must have one common "sub-root" node. - - Examples: - - - Retrieve a file by CID - $ lotus client retrieve Qm... my-file.txt - - - Retrieve a file by CID from f0123 - $ lotus client retrieve --provider f0123 Qm... my-file.txt - - - Retrieve a first file from a specified directory - $ lotus client retrieve --data-selector /Links/0/Hash Qm... my-file.txt - - -OPTIONS: - --car Export to a car file instead of a regular file (default: false) - --data-selector value, --datamodel-path-selector value IPLD datamodel text-path selector, or IPLD json selector - --car-export-merkle-proof (requires --data-selector and --car) Export data-selector merkle proof (default: false) - --from value address to send transactions from - --provider value, --miner value provider to use for retrieval, if not present it'll use local discovery - --maxPrice value maximum price the client is willing to consider (default: 0 FIL) - --pieceCid value require data to be retrieved from a specific Piece CID - --allow-local (default: false) - --help, -h show help -``` - -### lotus client cat -``` -NAME: - lotus client cat - Show data from network - -USAGE: - lotus client cat [command options] [dataCid] - -CATEGORY: - RETRIEVAL - -OPTIONS: - --ipld list IPLD datamodel links (default: false) - --data-selector value IPLD datamodel text-path selector, or IPLD json selector - --from value address to send transactions from - --provider value, --miner value provider to use for retrieval, if not present it'll use local discovery - --maxPrice value maximum price the client is willing to consider (default: 0 FIL) - --pieceCid value require data to be retrieved from a specific Piece CID - --allow-local (default: false) - --help, -h show help -``` - -### lotus client ls -``` -NAME: - lotus client ls - List object links - -USAGE: - lotus client ls [command options] [dataCid] - -CATEGORY: - RETRIEVAL - -OPTIONS: - --ipld list IPLD datamodel links (default: false) - --depth value list links recursively up to the specified depth (default: 1) - --data-selector value IPLD datamodel text-path selector, or IPLD json selector - --from value address to send transactions from - --provider value, --miner value provider to use for retrieval, if not present it'll use local discovery - --maxPrice value maximum price the client is willing to consider (default: 0 FIL) - --pieceCid value require data to be retrieved from a specific Piece CID - --allow-local (default: false) - --help, -h show help -``` - -### lotus client cancel-retrieval -``` -NAME: - lotus client cancel-retrieval - Cancel a retrieval deal by deal ID; this also cancels the associated transfer - -USAGE: - lotus client cancel-retrieval [command options] [arguments...] - -CATEGORY: - RETRIEVAL - -OPTIONS: - --deal-id value specify retrieval deal by deal ID (default: 0) - --help, -h show help -``` - -### lotus client list-retrievals -``` -NAME: - lotus client list-retrievals - List retrieval market deals - -USAGE: - lotus client list-retrievals [command options] [arguments...] - -CATEGORY: - RETRIEVAL - -OPTIONS: - --verbose, -v print verbose deal details (default: false) - --show-failed show failed/failing deals (default: true) - --completed show completed retrievals (default: false) - --watch watch deal updates in real-time, rather than a one time list (default: false) - --help, -h show help -``` - -### lotus client deal -``` -NAME: - lotus client deal - Initialize storage deal with a miner - -USAGE: - lotus client deal [command options] [dataCid miner price duration] - -CATEGORY: - STORAGE - -DESCRIPTION: - Make a deal with a miner. - dataCid comes from running 'lotus client import'. - miner is the address of the miner you wish to make a deal with. - price is measured in FIL/Epoch. Miners usually don't accept a bid - lower than their advertised ask (which is in FIL/GiB/Epoch). You can check a miners listed price - with 'lotus client query-ask '. - duration is how long the miner should store the data for, in blocks. - The minimum value is 518400 (6 months). - -OPTIONS: - --manual-piece-cid value manually specify piece commitment for data (dataCid must be to a car file) - --manual-piece-size value if manually specifying piece cid, used to specify size (dataCid must be to a car file) (default: 0) - --manual-stateless-deal instructs the node to send an offline deal without registering it with the deallist/fsm (default: false) - --from value specify address to fund the deal with - --start-epoch value specify the epoch that the deal should start at (default: -1) - --fast-retrieval indicates that data should be available for fast retrieval (default: true) - --verified-deal indicate that the deal counts towards verified client total (default: true if client is verified, false otherwise) - --provider-collateral value specify the requested provider collateral the miner should put up - --help, -h show help -``` - -### lotus client query-ask -``` -NAME: - lotus client query-ask - Find a miners ask - -USAGE: - lotus client query-ask [command options] [minerAddress] - -CATEGORY: - STORAGE - -OPTIONS: - --peerid value specify peer ID of node to make query against - --size value data size in bytes (default: 0) - --duration value deal duration (default: 0) - --help, -h show help -``` - -### lotus client list-deals -``` -NAME: - lotus client list-deals - List storage market deals - -USAGE: - lotus client list-deals [command options] [arguments...] - -CATEGORY: - STORAGE - -OPTIONS: - --verbose, -v print verbose deal details (default: false) - --show-failed show failed/failing deals (default: false) - --watch watch deal updates in real-time, rather than a one time list (default: false) - --help, -h show help -``` - -### lotus client get-deal -``` -NAME: - lotus client get-deal - Print detailed deal information - -USAGE: - lotus client get-deal [command options] [proposalCID] - -CATEGORY: - STORAGE - -OPTIONS: - --help, -h show help -``` - -### lotus client list-asks -``` -NAME: - lotus client list-asks - List asks for top miners - -USAGE: - lotus client list-asks [command options] [arguments...] - -CATEGORY: - STORAGE - -OPTIONS: - --by-ping sort by ping (default: false) - --output-format value Either 'text' or 'csv' (default: "text") - --protocols Output supported deal protocols (default: false) - --help, -h show help -``` - -### lotus client deal-stats -``` -NAME: - lotus client deal-stats - Print statistics about local storage deals - -USAGE: - lotus client deal-stats [command options] [arguments...] - -CATEGORY: - STORAGE - -OPTIONS: - --newer-than value (default: 0s) - --help, -h show help -``` - -### lotus client inspect-deal -``` -NAME: - lotus client inspect-deal - Inspect detailed information about deal's lifecycle and the various stages it goes through - -USAGE: - lotus client inspect-deal [command options] [arguments...] - -CATEGORY: - STORAGE - -OPTIONS: - --deal-id value (default: 0) - --proposal-cid value - --help, -h show help -``` - -### lotus client commP -``` -NAME: - lotus client commP - Calculate the piece-cid (commP) of a CAR file - -USAGE: - lotus client commP [command options] [inputFile] - -CATEGORY: - UTIL - -OPTIONS: - --help, -h show help -``` - -### lotus client generate-car -``` -NAME: - lotus client generate-car - Generate a car file from input - -USAGE: - lotus client generate-car [command options] [inputPath outputPath] - -CATEGORY: - UTIL - -OPTIONS: - --help, -h show help -``` - -### lotus client balances -``` -NAME: - lotus client balances - Print storage market client balances - -USAGE: - lotus client balances [command options] [arguments...] - -CATEGORY: - UTIL - -OPTIONS: - --client value specify storage client address - --help, -h show help -``` - -### lotus client list-transfers -``` -NAME: - lotus client list-transfers - List ongoing data transfers for deals - -USAGE: - lotus client list-transfers [command options] [arguments...] - -CATEGORY: - UTIL - -OPTIONS: - --verbose, -v print verbose transfer details (default: false) - --completed show completed data transfers (default: false) - --watch watch deal updates in real-time, rather than a one time list (default: false) - --show-failed show failed/cancelled transfers (default: false) - --help, -h show help -``` - -### lotus client restart-transfer -``` -NAME: - lotus client restart-transfer - Force restart a stalled data transfer - -USAGE: - lotus client restart-transfer [command options] [transferID] - -CATEGORY: - UTIL - -OPTIONS: - --peerid value narrow to transfer with specific peer - --initiator specify only transfers where peer is/is not initiator (default: true) - --help, -h show help -``` - -### lotus client cancel-transfer -``` -NAME: - lotus client cancel-transfer - Force cancel a data transfer - -USAGE: - lotus client cancel-transfer [command options] [transferID] - -CATEGORY: - UTIL - -OPTIONS: - --peerid value narrow to transfer with specific peer - --initiator specify only transfers where peer is/is not initiator (default: true) - --cancel-timeout value time to wait for cancel to be sent to storage provider (default: 5s) - --help, -h show help -``` - ## lotus msig ``` NAME: @@ -1380,9 +870,8 @@ USAGE: lotus paych add-funds [command options] [fromAddress toAddress amount] OPTIONS: - --restart-retrievals restart stalled retrieval deals on this payment channel (default: true) - --reserve mark funds as reserved (default: false) - --help, -h show help + --reserve mark funds as reserved (default: false) + --help, -h show help ``` ### lotus paych list diff --git a/documentation/en/cli-sptool.md b/documentation/en/cli-sptool.md deleted file mode 100644 index 3a53fe7a338..00000000000 --- a/documentation/en/cli-sptool.md +++ /dev/null @@ -1,474 +0,0 @@ -# sptool -``` -NAME: - sptool - Manage Filecoin Miner Actor - -USAGE: - sptool [global options] command [command options] [arguments...] - -VERSION: - 1.27.0 - -COMMANDS: - actor Manage Filecoin Miner Actor Metadata - info Print miner actor info - sectors interact with sector store - proving View proving information - help, h Shows a list of commands or help for one command - -GLOBAL OPTIONS: - --log-level value (default: "info") - --actor value miner actor to manage [$SP_ADDRESS] - --help, -h show help - --version, -v print the version -``` - -## sptool actor -``` -NAME: - sptool actor - Manage Filecoin Miner Actor Metadata - -USAGE: - sptool actor command [command options] [arguments...] - -COMMANDS: - set-addresses, set-addrs set addresses that your miner can be publicly dialed on - withdraw withdraw available balance to beneficiary - repay-debt pay down a miner's debt - set-peer-id set the peer id of your miner - set-owner Set owner address (this command should be invoked twice, first with the old owner as the senderAddress, and then with the new owner) - control Manage control addresses - propose-change-worker Propose a worker address change - confirm-change-worker Confirm a worker address change - compact-allocated compact allocated sectors bitfield - propose-change-beneficiary Propose a beneficiary address change - confirm-change-beneficiary Confirm a beneficiary address change - new-miner Initializes a new miner actor - help, h Shows a list of commands or help for one command - -OPTIONS: - --help, -h show help -``` - -### sptool actor set-addresses -``` -NAME: - sptool actor set-addresses - set addresses that your miner can be publicly dialed on - -USAGE: - sptool actor set-addresses [command options] - -OPTIONS: - --from value optionally specify the account to send the message from - --gas-limit value set gas limit (default: 0) - --unset unset address (default: false) - --help, -h show help -``` - -### sptool actor withdraw -``` -NAME: - sptool actor withdraw - withdraw available balance to beneficiary - -USAGE: - sptool actor withdraw [command options] [amount (FIL)] - -OPTIONS: - --confidence value number of block confirmations to wait for (default: 5) - --beneficiary send withdraw message from the beneficiary address (default: false) - --help, -h show help -``` - -### sptool actor repay-debt -``` -NAME: - sptool actor repay-debt - pay down a miner's debt - -USAGE: - sptool actor repay-debt [command options] [amount (FIL)] - -OPTIONS: - --from value optionally specify the account to send funds from - --help, -h show help -``` - -### sptool actor set-peer-id -``` -NAME: - sptool actor set-peer-id - set the peer id of your miner - -USAGE: - sptool actor set-peer-id [command options] - -OPTIONS: - --gas-limit value set gas limit (default: 0) - --help, -h show help -``` - -### sptool actor set-owner -``` -NAME: - sptool actor set-owner - Set owner address (this command should be invoked twice, first with the old owner as the senderAddress, and then with the new owner) - -USAGE: - sptool actor set-owner [command options] [newOwnerAddress senderAddress] - -OPTIONS: - --really-do-it Actually send transaction performing the action (default: false) - --help, -h show help -``` - -### sptool actor control -``` -NAME: - sptool actor control - Manage control addresses - -USAGE: - sptool actor control command [command options] [arguments...] - -COMMANDS: - list Get currently set control addresses. Note: This excludes most roles as they are not known to the immediate chain state. - set Set control address(-es) - help, h Shows a list of commands or help for one command - -OPTIONS: - --help, -h show help -``` - -#### sptool actor control list -``` -NAME: - sptool actor control list - Get currently set control addresses. Note: This excludes most roles as they are not known to the immediate chain state. - -USAGE: - sptool actor control list [command options] [arguments...] - -OPTIONS: - --verbose (default: false) - --help, -h show help -``` - -#### sptool actor control set -``` -NAME: - sptool actor control set - Set control address(-es) - -USAGE: - sptool actor control set [command options] [...address] - -OPTIONS: - --really-do-it Actually send transaction performing the action (default: false) - --help, -h show help -``` - -### sptool actor propose-change-worker -``` -NAME: - sptool actor propose-change-worker - Propose a worker address change - -USAGE: - sptool actor propose-change-worker [command options] [address] - -OPTIONS: - --really-do-it Actually send transaction performing the action (default: false) - --help, -h show help -``` - -### sptool actor confirm-change-worker -``` -NAME: - sptool actor confirm-change-worker - Confirm a worker address change - -USAGE: - sptool actor confirm-change-worker [command options] [address] - -OPTIONS: - --really-do-it Actually send transaction performing the action (default: false) - --help, -h show help -``` - -### sptool actor compact-allocated -``` -NAME: - sptool actor compact-allocated - compact allocated sectors bitfield - -USAGE: - sptool actor compact-allocated [command options] [arguments...] - -OPTIONS: - --mask-last-offset value Mask sector IDs from 0 to 'highest_allocated - offset' (default: 0) - --mask-upto-n value Mask sector IDs from 0 to 'n' (default: 0) - --really-do-it Actually send transaction performing the action (default: false) - --help, -h show help -``` - -### sptool actor propose-change-beneficiary -``` -NAME: - sptool actor propose-change-beneficiary - Propose a beneficiary address change - -USAGE: - sptool actor propose-change-beneficiary [command options] [beneficiaryAddress quota expiration] - -OPTIONS: - --really-do-it Actually send transaction performing the action (default: false) - --overwrite-pending-change Overwrite the current beneficiary change proposal (default: false) - --actor value specify the address of miner actor - --help, -h show help -``` - -### sptool actor confirm-change-beneficiary -``` -NAME: - sptool actor confirm-change-beneficiary - Confirm a beneficiary address change - -USAGE: - sptool actor confirm-change-beneficiary [command options] [minerID] - -OPTIONS: - --really-do-it Actually send transaction performing the action (default: false) - --existing-beneficiary send confirmation from the existing beneficiary address (default: false) - --new-beneficiary send confirmation from the new beneficiary address (default: false) - --help, -h show help -``` - -### sptool actor new-miner -``` -NAME: - sptool actor new-miner - Initializes a new miner actor - -USAGE: - sptool actor new-miner [command options] [arguments...] - -OPTIONS: - --worker value, -w value worker key to use for new miner initialisation - --owner value, -o value owner key to use for new miner initialisation - --from value, -f value address to send actor(miner) creation message from - --sector-size value specify sector size to use for new miner initialisation - --help, -h show help -``` - -## sptool info -``` -NAME: - sptool info - Print miner actor info - -USAGE: - sptool info [command options] [arguments...] - -OPTIONS: - --help, -h show help -``` - -## sptool sectors -``` -NAME: - sptool sectors - interact with sector store - -USAGE: - sptool sectors command [command options] [arguments...] - -COMMANDS: - status Get the seal status of a sector by its number - list List sectors - precommits Print on-chain precommit info - check-expire Inspect expiring sectors - expired Get or cleanup expired sectors - extend Extend expiring sectors while not exceeding each sector's max life - terminate Forcefully terminate a sector (WARNING: This means losing power and pay a one-time termination penalty(including collateral) for the terminated sector) - compact-partitions removes dead sectors from partitions and reduces the number of partitions used if possible - help, h Shows a list of commands or help for one command - -OPTIONS: - --help, -h show help -``` - -### sptool sectors status -``` -NAME: - sptool sectors status - Get the seal status of a sector by its number - -USAGE: - sptool sectors status [command options] - -OPTIONS: - --log, -l display event log (default: false) - --on-chain-info, -c show sector on chain info (default: false) - --partition-info, -p show partition related info (default: false) - --proof print snark proof bytes as hex (default: false) - --help, -h show help -``` - -### sptool sectors list -``` -NAME: - sptool sectors list - List sectors - -USAGE: - sptool sectors list [command options] [arguments...] - -OPTIONS: - --help, -h show help -``` - -### sptool sectors precommits -``` -NAME: - sptool sectors precommits - Print on-chain precommit info - -USAGE: - sptool sectors precommits [command options] [arguments...] - -OPTIONS: - --help, -h show help -``` - -### sptool sectors check-expire -``` -NAME: - sptool sectors check-expire - Inspect expiring sectors - -USAGE: - sptool sectors check-expire [command options] [arguments...] - -OPTIONS: - --cutoff value skip sectors whose current expiration is more than epochs from now, defaults to 60 days (default: 172800) - --help, -h show help -``` - -### sptool sectors expired -``` -NAME: - sptool sectors expired - Get or cleanup expired sectors - -USAGE: - sptool sectors expired [command options] [arguments...] - -OPTIONS: - --expired-epoch value epoch at which to check sector expirations (default: WinningPoSt lookback epoch) - --help, -h show help -``` - -### sptool sectors extend -``` -NAME: - sptool sectors extend - Extend expiring sectors while not exceeding each sector's max life - -USAGE: - sptool sectors extend [command options] - -OPTIONS: - --from value only consider sectors whose current expiration epoch is in the range of [from, to], defaults to: now + 120 (1 hour) (default: 0) - --to value only consider sectors whose current expiration epoch is in the range of [from, to], defaults to: now + 92160 (32 days) (default: 0) - --sector-file value provide a file containing one sector number in each line, ignoring above selecting criteria - --exclude value optionally provide a file containing excluding sectors - --extension value try to extend selected sectors by this number of epochs, defaults to 540 days (default: 1555200) - --new-expiration value try to extend selected sectors to this epoch, ignoring extension (default: 0) - --only-cc only extend CC sectors (useful for making sector ready for snap upgrade) (default: false) - --drop-claims drop claims for sectors that can be extended, but only by dropping some of their verified power claims (default: false) - --tolerance value don't try to extend sectors by fewer than this number of epochs, defaults to 7 days (default: 20160) - --max-fee value use up to this amount of FIL for one message. pass this flag to avoid message congestion. (default: "0") - --max-sectors value the maximum number of sectors contained in each message (default: 0) - --really-do-it pass this flag to really extend sectors, otherwise will only print out json representation of parameters (default: false) - --help, -h show help -``` - -### sptool sectors terminate -``` -NAME: - sptool sectors terminate - Forcefully terminate a sector (WARNING: This means losing power and pay a one-time termination penalty(including collateral) for the terminated sector) - -USAGE: - sptool sectors terminate [command options] [sectorNum1 sectorNum2 ...] - -OPTIONS: - --actor value specify the address of miner actor - --really-do-it pass this flag if you know what you are doing (default: false) - --from value specify the address to send the terminate message from - --help, -h show help -``` - -### sptool sectors compact-partitions -``` -NAME: - sptool sectors compact-partitions - removes dead sectors from partitions and reduces the number of partitions used if possible - -USAGE: - sptool sectors compact-partitions [command options] [arguments...] - -OPTIONS: - --deadline value the deadline to compact the partitions in (default: 0) - --partitions value [ --partitions value ] list of partitions to compact sectors in - --really-do-it Actually send transaction performing the action (default: false) - --help, -h show help -``` - -## sptool proving -``` -NAME: - sptool proving - View proving information - -USAGE: - sptool proving command [command options] [arguments...] - -COMMANDS: - info View current state information - deadlines View the current proving period deadlines information - deadline View the current proving period deadline information by its index - faults View the currently known proving faulty sectors information - help, h Shows a list of commands or help for one command - -OPTIONS: - --help, -h show help -``` - -### sptool proving info -``` -NAME: - sptool proving info - View current state information - -USAGE: - sptool proving info [command options] [arguments...] - -OPTIONS: - --help, -h show help -``` - -### sptool proving deadlines -``` -NAME: - sptool proving deadlines - View the current proving period deadlines information - -USAGE: - sptool proving deadlines [command options] [arguments...] - -OPTIONS: - --all, -a Count all sectors (only live sectors are counted by default) (default: false) - --help, -h show help -``` - -### sptool proving deadline -``` -NAME: - sptool proving deadline - View the current proving period deadline information by its index - -USAGE: - sptool proving deadline [command options] - -OPTIONS: - --sector-nums, -n Print sector/fault numbers belonging to this deadline (default: false) - --bitfield, -b Print partition bitfield stats (default: false) - --help, -h show help -``` - -### sptool proving faults -``` -NAME: - sptool proving faults - View the currently known proving faulty sectors information - -USAGE: - sptool proving faults [command options] [arguments...] - -OPTIONS: - --help, -h show help -``` diff --git a/documentation/en/default-curio-config.toml b/documentation/en/default-curio-config.toml deleted file mode 100644 index afcb7608aa6..00000000000 --- a/documentation/en/default-curio-config.toml +++ /dev/null @@ -1,366 +0,0 @@ -[Subsystems] - # EnableWindowPost enables window post to be executed on this curio instance. Each machine in the cluster - # with WindowPoSt enabled will also participate in the window post scheduler. It is possible to have multiple - # machines with WindowPoSt enabled which will provide redundancy, and in case of multiple partitions per deadline, - # will allow for parallel processing of partitions. - # - # It is possible to have instances handling both WindowPoSt and WinningPoSt, which can provide redundancy without - # the need for additional machines. In setups like this it is generally recommended to run - # partitionsPerDeadline+1 machines. - # - # type: bool - #EnableWindowPost = false - - # type: int - #WindowPostMaxTasks = 0 - - # EnableWinningPost enables winning post to be executed on this curio instance. - # Each machine in the cluster with WinningPoSt enabled will also participate in the winning post scheduler. - # It is possible to mix machines with WindowPoSt and WinningPoSt enabled, for details see the EnableWindowPost - # documentation. - # - # type: bool - #EnableWinningPost = false - - # type: int - #WinningPostMaxTasks = 0 - - # EnableParkPiece enables the "piece parking" task to run on this node. This task is responsible for fetching - # pieces from the network and storing them in the storage subsystem until sectors are sealed. This task is - # only applicable when integrating with boost, and should be enabled on nodes which will hold deal data - # from boost until sectors containing the related pieces have the TreeD/TreeR constructed. - # Note that future Curio implementations will have a separate task type for fetching pieces from the internet. - # - # type: bool - #EnableParkPiece = false - - # type: int - #ParkPieceMaxTasks = 0 - - # EnableSealSDR enables SDR tasks to run. SDR is the long sequential computation - # creating 11 layer files in sector cache directory. - # - # SDR is the first task in the sealing pipeline. It's inputs are just the hash of the - # unsealed data (CommD), sector number, miner id, and the seal proof type. - # It's outputs are the 11 layer files in the sector cache directory. - # - # In lotus-miner this was run as part of PreCommit1. - # - # type: bool - #EnableSealSDR = false - - # The maximum amount of SDR tasks that can run simultaneously. Note that the maximum number of tasks will - # also be bounded by resources available on the machine. - # - # type: int - #SealSDRMaxTasks = 0 - - # EnableSealSDRTrees enables the SDR pipeline tree-building task to run. - # This task handles encoding of unsealed data into last sdr layer and building - # of TreeR, TreeC and TreeD. - # - # This task runs after SDR - # TreeD is first computed with optional input of unsealed data - # TreeR is computed from replica, which is first computed as field - # addition of the last SDR layer and the bottom layer of TreeD (which is the unsealed data) - # TreeC is computed from the 11 SDR layers - # The 3 trees will later be used to compute the PoRep proof. - # - # In case of SyntheticPoRep challenges for PoRep will be pre-generated at this step, and trees and layers - # will be dropped. SyntheticPoRep works by pre-generating a very large set of challenges (~30GiB on disk) - # then using a small subset of them for the actual PoRep computation. This allows for significant scratch space - # saving between PreCommit and PoRep generation at the expense of more computation (generating challenges in this step) - # - # In lotus-miner this was run as part of PreCommit2 (TreeD was run in PreCommit1). - # Note that nodes with SDRTrees enabled will also answer to Finalize tasks, - # which just remove unneeded tree data after PoRep is computed. - # - # type: bool - #EnableSealSDRTrees = false - - # The maximum amount of SealSDRTrees tasks that can run simultaneously. Note that the maximum number of tasks will - # also be bounded by resources available on the machine. - # - # type: int - #SealSDRTreesMaxTasks = 0 - - # FinalizeMaxTasks is the maximum amount of finalize tasks that can run simultaneously. - # The finalize task is enabled on all machines which also handle SDRTrees tasks. Finalize ALWAYS runs on whichever - # machine holds sector cache files, as it removes unneeded tree data after PoRep is computed. - # Finalize will run in parallel with the SubmitCommitMsg task. - # - # type: int - #FinalizeMaxTasks = 0 - - # EnableSendPrecommitMsg enables the sending of precommit messages to the chain - # from this curio instance. - # This runs after SDRTrees and uses the output CommD / CommR (roots of TreeD / TreeR) for the message - # - # type: bool - #EnableSendPrecommitMsg = false - - # EnablePoRepProof enables the computation of the porep proof - # - # This task runs after interactive-porep seed becomes available, which happens 150 epochs (75min) after the - # precommit message lands on chain. This task should run on a machine with a GPU. Vanilla PoRep proofs are - # requested from the machine which holds sector cache files which most likely is the machine which ran the SDRTrees - # task. - # - # In lotus-miner this was Commit1 / Commit2 - # - # type: bool - #EnablePoRepProof = false - - # The maximum amount of PoRepProof tasks that can run simultaneously. Note that the maximum number of tasks will - # also be bounded by resources available on the machine. - # - # type: int - #PoRepProofMaxTasks = 0 - - # EnableSendCommitMsg enables the sending of commit messages to the chain - # from this curio instance. - # - # type: bool - #EnableSendCommitMsg = false - - # EnableMoveStorage enables the move-into-long-term-storage task to run on this curio instance. - # This tasks should only be enabled on nodes with long-term storage. - # - # The MoveStorage task is the last task in the sealing pipeline. It moves the sealed sector data from the - # SDRTrees machine into long-term storage. This task runs after the Finalize task. - # - # type: bool - #EnableMoveStorage = false - - # The maximum amount of MoveStorage tasks that can run simultaneously. Note that the maximum number of tasks will - # also be bounded by resources available on the machine. It is recommended that this value is set to a number which - # uses all available network (or disk) bandwidth on the machine without causing bottlenecks. - # - # type: int - #MoveStorageMaxTasks = 0 - - # BoostAdapters is a list of tuples of miner address and port/ip to listen for market (e.g. boost) requests. - # This interface is compatible with the lotus-miner RPC, implementing a subset needed for storage market operations. - # Strings should be in the format "actor:port" or "actor:ip:port". Default listen address is 0.0.0.0 - # Example: "f0123:32100", "f0123:127.0.0.1:32100". Multiple addresses can be specified. - # - # When a market node like boost gives Curio's market RPC a deal to placing into a sector, Curio will first store the - # deal data in a temporary location "Piece Park" before assigning it to a sector. This requires that at least one - # node in the cluster has the EnableParkPiece option enabled and has sufficient scratch space to store the deal data. - # This is different from lotus-miner which stored the deal data into an "unsealed" sector as soon as the deal was - # received. Deal data in PiecePark is accessed when the sector TreeD and TreeR are computed, but isn't needed for - # the initial SDR layers computation. Pieces in PiecePark are removed after all sectors referencing the piece are - # sealed. - # - # To get API info for boost configuration run 'curio market rpc-info' - # - # NOTE: All deal data will flow through this service, so it should be placed on a machine running boost or on - # a machine which handles ParkPiece tasks. - # - # type: []string - #BoostAdapters = [] - - # EnableWebGui enables the web GUI on this curio instance. The UI has minimal local overhead, but it should - # only need to be run on a single machine in the cluster. - # - # type: bool - #EnableWebGui = false - - # The address that should listen for Web GUI requests. - # - # type: string - #GuiAddress = ":4701" - - -[Fees] - # type: types.FIL - #DefaultMaxFee = "0.07 FIL" - - # type: types.FIL - #MaxPreCommitGasFee = "0.025 FIL" - - # type: types.FIL - #MaxCommitGasFee = "0.05 FIL" - - # type: types.FIL - #MaxTerminateGasFee = "0.5 FIL" - - # WindowPoSt is a high-value operation, so the default fee should be high. - # - # type: types.FIL - #MaxWindowPoStGasFee = "5 FIL" - - # type: types.FIL - #MaxPublishDealsFee = "0.05 FIL" - - [Fees.MaxPreCommitBatchGasFee] - # type: types.FIL - #Base = "0 FIL" - - # type: types.FIL - #PerSector = "0.02 FIL" - - [Fees.MaxCommitBatchGasFee] - # type: types.FIL - #Base = "0 FIL" - - # type: types.FIL - #PerSector = "0.03 FIL" - - -[[Addresses]] - #PreCommitControl = [] - - #CommitControl = [] - - #TerminateControl = [] - - #DisableOwnerFallback = false - - #DisableWorkerFallback = false - - #MinerAddresses = [] - - -[Proving] - # Maximum number of sector checks to run in parallel. (0 = unlimited) - # - # WARNING: Setting this value too high may make the node crash by running out of stack - # WARNING: Setting this value too low may make sector challenge reading much slower, resulting in failed PoSt due - # to late submission. - # - # After changing this option, confirm that the new value works in your setup by invoking - # 'lotus-miner proving compute window-post 0' - # - # type: int - #ParallelCheckLimit = 32 - - # Maximum amount of time a proving pre-check can take for a sector. If the check times out the sector will be skipped - # - # WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the - # test challenge took longer than this timeout - # WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this sector are - # blocked (e.g. in case of disconnected NFS mount) - # - # type: Duration - #SingleCheckTimeout = "10m0s" - - # Maximum amount of time a proving pre-check can take for an entire partition. If the check times out, sectors in - # the partition which didn't get checked on time will be skipped - # - # WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the - # test challenge took longer than this timeout - # WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this partition are - # blocked or slow - # - # type: Duration - #PartitionCheckTimeout = "20m0s" - - # Disable WindowPoSt provable sector readability checks. - # - # In normal operation, when preparing to compute WindowPoSt, lotus-miner will perform a round of reading challenges - # from all sectors to confirm that those sectors can be proven. Challenges read in this process are discarded, as - # we're only interested in checking that sector data can be read. - # - # When using builtin proof computation (no PoSt workers, and DisableBuiltinWindowPoSt is set to false), this process - # can save a lot of time and compute resources in the case that some sectors are not readable - this is caused by - # the builtin logic not skipping snark computation when some sectors need to be skipped. - # - # When using PoSt workers, this process is mostly redundant, with PoSt workers challenges will be read once, and - # if challenges for some sectors aren't readable, those sectors will just get skipped. - # - # Disabling sector pre-checks will slightly reduce IO load when proving sectors, possibly resulting in shorter - # time to produce window PoSt. In setups with good IO capabilities the effect of this option on proving time should - # be negligible. - # - # NOTE: It likely is a bad idea to disable sector pre-checks in setups with no PoSt workers. - # - # NOTE: Even when this option is enabled, recovering sectors will be checked before recovery declaration message is - # sent to the chain - # - # After changing this option, confirm that the new value works in your setup by invoking - # 'lotus-miner proving compute window-post 0' - # - # type: bool - #DisableWDPoStPreChecks = false - - # Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (3 in nv21) - # - # A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors. - # // - # Note that setting this value lower may result in less efficient gas use - more messages will be sent, - # to prove each deadline, resulting in more total gas use (but each message will have lower gas limit) - # - # Setting this value above the network limit has no effect - # - # type: int - #MaxPartitionsPerPoStMessage = 0 - - # In some cases when submitting DeclareFaultsRecovered messages, - # there may be too many recoveries to fit in a BlockGasLimit. - # In those cases it may be necessary to set this value to something low (eg 1); - # Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed, - # resulting in more total gas use (but each message will have lower gas limit) - # - # type: int - #MaxPartitionsPerRecoveryMessage = 0 - - # Enable single partition per PoSt Message for partitions containing recovery sectors - # - # In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be - # too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition - # with recovering sectors in the post message - # - # Note that setting this value lower may result in less efficient gas use - more messages will be sent, - # to prove each deadline, resulting in more total gas use (but each message will have lower gas limit) - # - # type: bool - #SingleRecoveringPartitionPerPostMessage = false - - -[Ingest] - # Maximum number of sectors that can be queued waiting for SDR to start processing. - # 0 = unlimited - # Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem. - # The SDR queue includes deals which are in the process of entering the sealing pipeline - size of this queue - # will also impact the maximum number of ParkPiece tasks which can run concurrently. - # - # SDR queue is the first queue in the sealing pipeline, meaning that it should be used as the primary backpressure mechanism. - # - # type: int - #MaxQueueSDR = 8 - - # Maximum number of sectors that can be queued waiting for SDRTrees to start processing. - # 0 = unlimited - # Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem. - # In case of the trees tasks it is possible that this queue grows more than this limit, the backpressure is only - # applied to sectors entering the pipeline. - # - # type: int - #MaxQueueTrees = 0 - - # Maximum number of sectors that can be queued waiting for PoRep to start processing. - # 0 = unlimited - # Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem. - # Like with the trees tasks, it is possible that this queue grows more than this limit, the backpressure is only - # applied to sectors entering the pipeline. - # - # type: int - #MaxQueuePoRep = 0 - - -[Journal] - # Events of the form: "system1:event1,system1:event2[,...]" - # - # type: string - #DisabledEvents = "" - - -[Apis] - # RPC Secret for the storage subsystem. - # If integrating with lotus-miner this must match the value from - # cat ~/.lotusminer/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU | jq -r .PrivateKey - # - # type: string - #StorageRPCSecret = "" - diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index 8d3c6a427e8..2971a4e9199 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -128,30 +128,6 @@ #TracerSourceAuth = "" -[Client] - # The maximum number of simultaneous data transfers between the client - # and storage providers for storage deals - # - # type: uint64 - # env var: LOTUS_CLIENT_SIMULTANEOUSTRANSFERSFORSTORAGE - #SimultaneousTransfersForStorage = 20 - - # The maximum number of simultaneous data transfers between the client - # and storage providers for retrieval deals - # - # type: uint64 - # env var: LOTUS_CLIENT_SIMULTANEOUSTRANSFERSFORRETRIEVAL - #SimultaneousTransfersForRetrieval = 20 - - # Require that retrievals perform no on-chain operations. Paid retrievals - # without existing payment channels with available funds will fail instead - # of automatically performing on-chain operations. - # - # type: bool - # env var: LOTUS_CLIENT_OFFCHAINRETRIEVAL - #OffChainRetrieval = false - - [Wallet] # type: string # env var: LOTUS_WALLET_REMOTEBACKEND diff --git a/documentation/en/default-lotus-miner-config.toml b/documentation/en/default-lotus-miner-config.toml index 17fd24fa370..bc2c374362d 100644 --- a/documentation/en/default-lotus-miner-config.toml +++ b/documentation/en/default-lotus-miner-config.toml @@ -141,10 +141,6 @@ # env var: LOTUS_SUBSYSTEMS_ENABLESECTORSTORAGE #EnableSectorStorage = true - # type: bool - # env var: LOTUS_SUBSYSTEMS_ENABLEMARKETS - #EnableMarkets = false - # When enabled, the sector index will reside in an external database # as opposed to the local KV store in the miner process # This is useful to allow workers to bypass the lotus miner to access sector information @@ -188,190 +184,12 @@ [Dealmaking] - # When enabled, the miner can accept online deals - # - # type: bool - # env var: LOTUS_DEALMAKING_CONSIDERONLINESTORAGEDEALS - #ConsiderOnlineStorageDeals = true - - # When enabled, the miner can accept offline deals - # - # type: bool - # env var: LOTUS_DEALMAKING_CONSIDEROFFLINESTORAGEDEALS - #ConsiderOfflineStorageDeals = true - - # When enabled, the miner can accept retrieval deals - # - # type: bool - # env var: LOTUS_DEALMAKING_CONSIDERONLINERETRIEVALDEALS - #ConsiderOnlineRetrievalDeals = true - - # When enabled, the miner can accept offline retrieval deals - # - # type: bool - # env var: LOTUS_DEALMAKING_CONSIDEROFFLINERETRIEVALDEALS - #ConsiderOfflineRetrievalDeals = true - - # When enabled, the miner can accept verified deals - # - # type: bool - # env var: LOTUS_DEALMAKING_CONSIDERVERIFIEDSTORAGEDEALS - #ConsiderVerifiedStorageDeals = true - - # When enabled, the miner can accept unverified deals - # - # type: bool - # env var: LOTUS_DEALMAKING_CONSIDERUNVERIFIEDSTORAGEDEALS - #ConsiderUnverifiedStorageDeals = true - - # A list of Data CIDs to reject when making deals - # - # type: []cid.Cid - # env var: LOTUS_DEALMAKING_PIECECIDBLOCKLIST - #PieceCidBlocklist = [] - - # Maximum expected amount of time getting the deal into a sealed sector will take - # This includes the time the deal will need to get transferred and published - # before being assigned to a sector - # - # type: Duration - # env var: LOTUS_DEALMAKING_EXPECTEDSEALDURATION - #ExpectedSealDuration = "24h0m0s" - - # Maximum amount of time proposed deal StartEpoch can be in future - # - # type: Duration - # env var: LOTUS_DEALMAKING_MAXDEALSTARTDELAY - #MaxDealStartDelay = "336h0m0s" - - # When a deal is ready to publish, the amount of time to wait for more - # deals to be ready to publish before publishing them all as a batch - # - # type: Duration - # env var: LOTUS_DEALMAKING_PUBLISHMSGPERIOD - #PublishMsgPeriod = "1h0m0s" - - # The maximum number of deals to include in a single PublishStorageDeals - # message - # - # type: uint64 - # env var: LOTUS_DEALMAKING_MAXDEALSPERPUBLISHMSG - #MaxDealsPerPublishMsg = 8 - - # The maximum collateral that the provider will put up against a deal, - # as a multiplier of the minimum collateral bound - # - # type: uint64 - # env var: LOTUS_DEALMAKING_MAXPROVIDERCOLLATERALMULTIPLIER - #MaxProviderCollateralMultiplier = 2 - - # The maximum allowed disk usage size in bytes of staging deals not yet - # passed to the sealing node by the markets service. 0 is unlimited. - # - # type: int64 - # env var: LOTUS_DEALMAKING_MAXSTAGINGDEALSBYTES - #MaxStagingDealsBytes = 0 - - # The maximum number of parallel online data transfers for storage deals - # - # type: uint64 - # env var: LOTUS_DEALMAKING_SIMULTANEOUSTRANSFERSFORSTORAGE - #SimultaneousTransfersForStorage = 20 - - # The maximum number of simultaneous data transfers from any single client - # for storage deals. - # Unset by default (0), and values higher than SimultaneousTransfersForStorage - # will have no effect; i.e. the total number of simultaneous data transfers - # across all storage clients is bound by SimultaneousTransfersForStorage - # regardless of this number. - # - # type: uint64 - # env var: LOTUS_DEALMAKING_SIMULTANEOUSTRANSFERSFORSTORAGEPERCLIENT - #SimultaneousTransfersForStoragePerClient = 0 - - # The maximum number of parallel online data transfers for retrieval deals - # - # type: uint64 - # env var: LOTUS_DEALMAKING_SIMULTANEOUSTRANSFERSFORRETRIEVAL - #SimultaneousTransfersForRetrieval = 20 - # Minimum start epoch buffer to give time for sealing of sector with deal. # # type: uint64 # env var: LOTUS_DEALMAKING_STARTEPOCHSEALINGBUFFER #StartEpochSealingBuffer = 480 - # A command used for fine-grained evaluation of storage deals - # see https://lotus.filecoin.io/storage-providers/advanced-configurations/market/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details - # - # type: string - # env var: LOTUS_DEALMAKING_FILTER - #Filter = "" - - # A command used for fine-grained evaluation of retrieval deals - # see https://lotus.filecoin.io/storage-providers/advanced-configurations/market/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details - # - # type: string - # env var: LOTUS_DEALMAKING_RETRIEVALFILTER - #RetrievalFilter = "" - - [Dealmaking.RetrievalPricing] - # env var: LOTUS_DEALMAKING_RETRIEVALPRICING_STRATEGY - #Strategy = "default" - - [Dealmaking.RetrievalPricing.Default] - # env var: LOTUS_DEALMAKING_RETRIEVALPRICING_DEFAULT_VERIFIEDDEALSFREETRANSFER - #VerifiedDealsFreeTransfer = true - - [Dealmaking.RetrievalPricing.External] - # env var: LOTUS_DEALMAKING_RETRIEVALPRICING_EXTERNAL_PATH - #Path = "" - - -[IndexProvider] - # Enable set whether to enable indexing announcement to the network and expose endpoints that - # allow indexer nodes to process announcements. Enabled by default. - # - # type: bool - # env var: LOTUS_INDEXPROVIDER_ENABLE - #Enable = true - - # EntriesCacheCapacity sets the maximum capacity to use for caching the indexing advertisement - # entries. Defaults to 1024 if not specified. The cache is evicted using LRU policy. The - # maximum storage used by the cache is a factor of EntriesCacheCapacity, EntriesChunkSize and - # the length of multihashes being advertised. For example, advertising 128-bit long multihashes - # with the default EntriesCacheCapacity, and EntriesChunkSize means the cache size can grow to - # 256MiB when full. - # - # type: int - # env var: LOTUS_INDEXPROVIDER_ENTRIESCACHECAPACITY - #EntriesCacheCapacity = 1024 - - # EntriesChunkSize sets the maximum number of multihashes to include in a single entries chunk. - # Defaults to 16384 if not specified. Note that chunks are chained together for indexing - # advertisements that include more multihashes than the configured EntriesChunkSize. - # - # type: int - # env var: LOTUS_INDEXPROVIDER_ENTRIESCHUNKSIZE - #EntriesChunkSize = 16384 - - # TopicName sets the topic name on which the changes to the advertised content are announced. - # If not explicitly specified, the topic name is automatically inferred from the network name - # in following format: '/indexer/ingest/' - # Defaults to empty, which implies the topic name is inferred from network name. - # - # type: string - # env var: LOTUS_INDEXPROVIDER_TOPICNAME - #TopicName = "" - - # PurgeCacheOnStart sets whether to clear any cached entries chunks when the provider engine - # starts. By default, the cache is rehydrated from previously cached entries stored in - # datastore if any is present. - # - # type: bool - # env var: LOTUS_INDEXPROVIDER_PURGECACHEONSTART - #PurgeCacheOnStart = false - [Proving] # Maximum number of sector checks to run in parallel. (0 = unlimited) @@ -896,63 +714,6 @@ #DisableWorkerFallback = false -[DAGStore] - # Path to the dagstore root directory. This directory contains three - # subdirectories, which can be symlinked to alternative locations if - # need be: - # - ./transients: caches unsealed deals that have been fetched from the - # storage subsystem for serving retrievals. - # - ./indices: stores shard indices. - # - ./datastore: holds the KV store tracking the state of every shard - # known to the DAG store. - # Default value: /dagstore (split deployment) or - # /dagstore (monolith deployment) - # - # type: string - # env var: LOTUS_DAGSTORE_ROOTDIR - #RootDir = "" - - # The maximum amount of indexing jobs that can run simultaneously. - # 0 means unlimited. - # Default value: 5. - # - # type: int - # env var: LOTUS_DAGSTORE_MAXCONCURRENTINDEX - #MaxConcurrentIndex = 5 - - # The maximum amount of unsealed deals that can be fetched simultaneously - # from the storage subsystem. 0 means unlimited. - # Default value: 0 (unlimited). - # - # type: int - # env var: LOTUS_DAGSTORE_MAXCONCURRENTREADYFETCHES - #MaxConcurrentReadyFetches = 0 - - # The maximum amount of unseals that can be processed simultaneously - # from the storage subsystem. 0 means unlimited. - # Default value: 0 (unlimited). - # - # type: int - # env var: LOTUS_DAGSTORE_MAXCONCURRENTUNSEALS - #MaxConcurrentUnseals = 5 - - # The maximum number of simultaneous inflight API calls to the storage - # subsystem. - # Default value: 100. - # - # type: int - # env var: LOTUS_DAGSTORE_MAXCONCURRENCYSTORAGECALLS - #MaxConcurrencyStorageCalls = 100 - - # The time between calls to periodic dagstore GC, in time.Duration string - # representation, e.g. 1m, 5m, 1h. - # Default value: 1 minute. - # - # type: Duration - # env var: LOTUS_DAGSTORE_GCINTERVAL - #GCInterval = "1m0s" - - [HarmonyDB] # HOSTS is a list of hostnames to nodes running YugabyteDB # in a cluster. Only 1 is required diff --git a/documentation/misc/Building_a_network_skeleton.md b/documentation/misc/Building_a_network_skeleton.md index 5aea6f706f8..2e7dbf2ca6a 100644 --- a/documentation/misc/Building_a_network_skeleton.md +++ b/documentation/misc/Building_a_network_skeleton.md @@ -17,8 +17,7 @@ Each repository has its own set of steps that need to be followed. This guide wi 3. Clone the [go-state-types](https://github.com/filecoin-project/go-state-types) repository. -4. In your Lotus repository, add `replace github.com/filecoin-project/go-state-types => ../go-state-types` to the very end of your Lotus `go.mod` file. - - This ensures that your local clone copy of `go-state-types` is used. Any changes you make there will be reflected in your Lotus project. +4. Clone the [lotus](https://github.com/filecoin-project/lotus) repository. ## Ref-FVM Checklist @@ -29,17 +28,33 @@ Each repository has its own set of steps that need to be followed. This guide wi - In fvm/src/machine/default.rs, locate the new function within your machine context. You'll find a SUPPORTED_VERSIONS constant that sets the range of supported network versions. Update this range to include the new network version. Do this by replacing the existing feature flag nvXX-dev and NetworkVersion::VXX with the new ones corresponding to your new network version. - In `shared/src/version/mod.rs`, in the `NetworkVersion` implementation, you will find a series of constants representing different network versions. To add a new network version, you need to declare a new constant: `pub const (VXX+1): Self = Self(XX+1);` -You can take a look at [this Ref-FVM PR as a reference](https://github.com/filecoin-project/ref-fvm/pull/1929), which added the skeleton for network version 22. +You can take a look at [this Ref-FVM PR as a reference](https://github.com/filecoin-project/ref-fvm/pull/2000), which added the skeleton for network version 23. You can also check out the [releasing primary FVM crates checklist here](https://github.com/filecoin-project/ref-fvm/blob/master/CONTRIBUTING.md#primary-fvm-crates) + +2. In a seperate PR bump the Ref-FVM version: + + - Bump the version in the root Cargo.toml file. + - Bump the fvm, fvm_shared and fvm_sdk versions in the `workspace` section in `ref-fvm/cargo.toml` + 1. `fvm→version` + 2. `fvm_shared→version` + 3. `fvm_sdk→version` + 4. `fvm_integration_tests→version` + - Update the cargo.lock file by running `cargo check --all` + - Make sure the `CHANGELOG.md` files in each of `fvm`, `sdk`, and `shared` are all up-to-date (look + through `git log -- path/to/crate`), set the release date & version, and add a new "Unreleased" + section. It may be appropriate to duplicate some entries across these crates if the changes are + relevant to multiple crates. + +You can take a look at [this PR as a reference](https://github.com/filecoin-project/ref-fvm/pull/2002). Wait for the PR to be merged, then the reviewer will publish a new release. ## Filecoin-FFI Checklist 1. Update the `TryFrom` implementation for `EngineVersion` in `rust/src/fvm/engine.rs` - Add the new network version number (XX+1) to the existing match arm for the network version. -2. Patch the FVM-dependency (fvm3) in `rust/cargo.toml` to use the custom branch of the FVM created in the [Ref-FVM Checklist](#ref-fvm-checklist)) - - Add `features = ["your-ref-fvm-branch"]` to tell Cargo to use you Ref-FVM branch. +2. Patch the FVM-dependency (fvm4 and fvm4_shared) in `rust/cargo.toml` to use the newly published Ref-FVM release. + - Add `features = ["nvXX+1-dev"]`. -You can take a look at this [Filecoin-FFI PR as a reference](https://github.com/filecoin-project/filecoin-ffi/pull/438), which added the skeleton for network version 22. +You can take a look at this [Filecoin-FFI PR as a reference](https://github.com/filecoin-project/filecoin-ffi/pull/454), which added the skeleton for network version 23. ## Go-State-Types Checklist @@ -53,11 +68,25 @@ You can take a look at this [Filecoin-FFI PR as a reference](https://github.com/ - In `func VersionForNetwork` add `case network.Version(XX+1): return Version(XX+1), nil`. - Add the new version to the gen step of the makefile. - Add `$(GO_BIN) run ./builtin/v(XX+1)/gen/gen.go`. + - Commit the above changes with a `create base nvXX+1 skeleton` message so its easier to review. + - In /builtin/vXX+1/migration, delete all the migration files that are specific to the previous network upgrade: + - Commit the above changes with a `Delete migration specific for nvXX` message so its easier to review. + - Check your `/builtin/vXX+1/check.go` file, and see if there is any Invariant TODOs that stems from the previous migration that needs to be cleaned up. + +You can take a look at this [Go-State-Types PR as a reference](https://github.com/filecoin-project/go-state-types/pull/257), which added the skeleton for network version 23. + +2. In a second PR based off your first PR, add a simple migration for the network upgrade: + + - Copy the system.go template [^1], and add it to your `/builtin/vXX+1/migration` folder. + - Copy the top.go template [^2], and add it to your `/builtin/vXX+1/migration` folder. + +You can take a look at this [Go-State-Types PR as a reference](https://github.com/filecoin-project/go-state-types/pull/258), which added added a simple migration for network version 23. -You can take a look at this [Go-State-Types PR as a reference](https://github.com/filecoin-project/go-state-types/pull/232), which added the skeleton for network version 22. - ## Lotus Checklist +1. In your Lotus repository, add `replace github.com/filecoin-project/go-state-types => ../go-state-types` to the very end of your Lotus `go.mod` file. + - This ensures that your local clone copy of `go-state-types` is used. Any changes you make there will be reflected in your Lotus project. + 1. Import new actors: - Create a mock actor-bundle for the new network version. @@ -99,8 +128,8 @@ You can take a look at this [Go-State-Types PR as a reference](https://github.co 4. Update `chain/consensus/filcns/upgrades.go`. - Import `nv(XX+1) "github.com/filecoin-project/go-state-types/builtin/v(XX+1)/migration`. - - Add Schedule. [^1] - - Add Migration. [^2] + - Add Schedule. [^3] + - Add Migration. [^4] 5. Add actorstype to the NewActorRegistry in `/chain/consensus/computestate.go`. - Add `inv.Register(actorstypes.Version(XX+1), vm.ActorsVersionPredicate(actorstypes.Version(XX+1)), builtin.MakeRegistry(actorstypes.Version(XX+1))`. @@ -118,18 +147,171 @@ You can take a look at this [Go-State-Types PR as a reference](https://github.co 10. Run `make docsgen-cli`. -And you're done! This should create a network upgrade skeleton that you are able to run locally with your local go-state-types clones, and a mock Actors-bundle. This will allow you to: +And you're done! These are all the steps necessary to create a network upgrade skeleton that you will be able to run in a local devnet, and creates a basis where you can start testing new FIPs. When running a local developer network from this Lotus branch, bringing in all it dependencies, you should be able to: - Have a local developer network that starts at the current network version. -- Be able to see the Actor CIDs/Actor version for the mock v12-bundle through `lotus state actor-cids --network-version XX+1` +- Be able to see the Actor CIDs/Actor version for the mock Actor-bundle through `lotus state actor-cids --network-version XX+1` - Have a successful pre-migration. -- Complete Migration at upgrade epoch, but fail immidiately after the upgrade. +- Complete the migration at upgrade epoch, with a succesful upgrade. +- Sync the new network version with the mock actor bundle, and be able to see that you are on a new network version with `lotus state network-version` + +You can take a look at this [Lotus PR as a reference](https://github.com/filecoin-project/lotus/pull/11897), which added the skeleton for network version 23. + +[^1]: Here is system.go template for a simple migration: + + ```go + package migration + + import ( + "context" + + system14 "github.com/filecoin-project/go-state-types/builtin/v14/system" + + "github.com/filecoin-project/go-state-types/migration" + + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + ) + + // System Actor migrator + type systemActorMigrator struct { + OutCodeCID cid.Cid + ManifestData cid.Cid + } + + func (m systemActorMigrator) MigratedCodeCID() cid.Cid { + return m.OutCodeCID + } + + func (m systemActorMigrator) MigrateState(ctx context.Context, store cbor.IpldStore, in migration.ActorMigrationInput) (*migration.ActorMigrationResult, error) { + // The ManifestData itself is already in the blockstore + state := system14.State{BuiltinActors: m.ManifestData} + stateHead, err := store.Put(ctx, &state) + if err != nil { + return nil, err + } + + return &migration.ActorMigrationResult{ + NewCodeCID: m.OutCodeCID, + NewHead: stateHead, + }, nil + } + + func (m systemActorMigrator) Deferred() bool { + return false + } + ``` + +[^2]: Here is top.go template for a simple migration: + + ```go + package migration + + import ( + "context" + + adt14 "github.com/filecoin-project/go-state-types/builtin/v14/util/adt" + + system13 "github.com/filecoin-project/go-state-types/builtin/v13/system" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/go-state-types/manifest" + "github.com/filecoin-project/go-state-types/migration" + + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "golang.org/x/xerrors" + ) -You can take a look at this [Lotus PR as a reference](https://github.com/filecoin-project/lotus/pull/11432), which added the skeleton for network version 22. + // MigrateStateTree Migrates the filecoin state tree starting from the global state tree and upgrading all actor state. + // The store must support concurrent writes (even if the configured worker count is 1). + func MigrateStateTree(ctx context.Context, store cbor.IpldStore, newManifestCID cid.Cid, actorsRootIn cid.Cid, priorEpoch abi.ChainEpoch, cfg migration.Config, log migration.Logger, cache migration.MigrationCache) (cid.Cid, error) { + if cfg.MaxWorkers <= 0 { + return cid.Undef, xerrors.Errorf("invalid migration config with %d workers", cfg.MaxWorkers) + } + + adtStore := adt14.WrapStore(ctx, store) + + // Load input and output state trees + actorsIn, err := builtin.LoadTree(adtStore, actorsRootIn) + if err != nil { + return cid.Undef, xerrors.Errorf("loading state tree: %w", err) + } + + // load old manifest data + systemActor, ok, err := actorsIn.GetActorV5(builtin.SystemActorAddr) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to get system actor: %w", err) + } + + if !ok { + return cid.Undef, xerrors.New("didn't find system actor") + } + + var systemState system13.State + if err := store.Get(ctx, systemActor.Head, &systemState); err != nil { + return cid.Undef, xerrors.Errorf("failed to get system actor state: %w", err) + } + + var oldManifestData manifest.ManifestData + if err := store.Get(ctx, systemState.BuiltinActors, &oldManifestData); err != nil { + return cid.Undef, xerrors.Errorf("failed to get old manifest data: %w", err) + } + + // load new manifest + var newManifest manifest.Manifest + if err := adtStore.Get(ctx, newManifestCID, &newManifest); err != nil { + return cid.Undef, xerrors.Errorf("error reading actor manifest: %w", err) + } + + if err := newManifest.Load(ctx, adtStore); err != nil { + return cid.Undef, xerrors.Errorf("error loading actor manifest: %w", err) + } -// TODO: Create a video-tutorial going through all the steps + // Maps prior version code CIDs to migration functions. + migrations := make(map[cid.Cid]migration.ActorMigration) + // Set of prior version code CIDs for actors to defer during iteration, for explicit migration afterwards. + deferredCodeIDs := make(map[cid.Cid]struct{}) + + for _, oldEntry := range oldManifestData.Entries { + newCodeCID, ok := newManifest.Get(oldEntry.Name) + if !ok { + return cid.Undef, xerrors.Errorf("code cid for %s actor not found in new manifest", oldEntry.Name) + } + migrations[oldEntry.Code] = migration.CachedMigration(cache, migration.CodeMigrator{OutCodeCID: newCodeCID}) + } + + // migrations that migrate both code and state, override entries in `migrations` + + // The System Actor + + newSystemCodeCID, ok := newManifest.Get(manifest.SystemKey) + if !ok { + return cid.Undef, xerrors.Errorf("code cid for system actor not found in new manifest") + } + + migrations[systemActor.Code] = systemActorMigrator{OutCodeCID: newSystemCodeCID, ManifestData: newManifest.Data} -[^1]: Here is an example of how you can add a schedule: + if len(migrations)+len(deferredCodeIDs) != len(oldManifestData.Entries) { + return cid.Undef, xerrors.Errorf("incomplete migration specification with %d code CIDs, need %d", len(migrations)+len(deferredCodeIDs), len(oldManifestData.Entries)) + } + + actorsOut, err := migration.RunMigration(ctx, cfg, cache, store, log, actorsIn, migrations) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to run migration: %w", err) + } + + outCid, err := actorsOut.Flush() + if err != nil { + return cid.Undef, xerrors.Errorf("failed to flush actorsOut: %w", err) + } + + return outCid, nil + } + ``` + +[^3]: Here is an example of how you can add a schedule: ```go { @@ -148,7 +330,7 @@ You can take a look at this [Lotus PR as a reference](https://github.com/filecoi This schedule should be added to the `DefaultUpgradeSchedule` function, specifically within the `updates` array. -[^2]: Here is an example of how you can add a migration: +[^4]: Here is an example of how you can add a migration: ```go func PreUpgradeActorsV(XX+1)(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error { @@ -189,7 +371,7 @@ You can take a look at this [Lotus PR as a reference](https://github.com/filecoi } newRoot, err := upgradeActorsV(XX+1)Common(ctx, sm, cache, root, epoch, ts, config) if err != nil { - return cid.Undef, xerrors.Errorf("migrating actors v11 state: %w", err) + return cid.Undef, xerrors.Errorf("migrating actors vXX state: %w", err) } return newRoot, nil } @@ -214,21 +396,21 @@ You can take a look at this [Lotus PR as a reference](https://github.com/filecoi if stateRoot.Version != types.StateTreeVersion5 { return cid.Undef, xerrors.Errorf( - "expected state root version 5 for actors v(XX+1) upgrade, got %d", + "expected state root version 5 for actors vXX+1 upgrade, got %d", stateRoot.Version, ) } manifest, ok := actors.GetManifest(actorstypes.Version(XX+1)) if !ok { - return cid.Undef, xerrors.Errorf("no manifest CID for v(XX+1) upgrade") + return cid.Undef, xerrors.Errorf("no manifest CID for vXX+1 upgrade") } // Perform the migration newHamtRoot, err := nv(XX+1).MigrateStateTree(ctx, adtStore, manifest, stateRoot.Actors, epoch, config, migrationLogger{}, cache) if err != nil { - return cid.Undef, xerrors.Errorf("upgrading to actors v11: %w", err) + return cid.Undef, xerrors.Errorf("upgrading to actors vXX+1: %w", err) } // Persist the result. @@ -252,4 +434,4 @@ You can take a look at this [Lotus PR as a reference](https://github.com/filecoi return newRoot, nil } - ``` \ No newline at end of file + ``` diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index ed08caaf877..081367cae7c 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit ed08caaf8778e1b6def83efd37fce41574214353 +Subproject commit 081367cae7cdfe87d8b7240a9c3767ce86a40b05 diff --git a/gateway/node.go b/gateway/node.go index e9c695c4a5a..0d7a04109fb 100644 --- a/gateway/node.go +++ b/gateway/node.go @@ -146,6 +146,7 @@ type TargetAPI interface { Web3ClientVersion(ctx context.Context) (string, error) EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error) EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) + EthTraceTransaction(ctx context.Context, txHash string) ([]*ethtypes.EthTraceTransaction, error) GetActorEventsRaw(ctx context.Context, filter *types.ActorEventFilter) ([]*types.ActorEvent, error) SubscribeActorEventsRaw(ctx context.Context, filter *types.ActorEventFilter) (<-chan *types.ActorEvent, error) diff --git a/gateway/proxy_eth.go b/gateway/proxy_eth.go index 218cc189da8..eca6ae2bf41 100644 --- a/gateway/proxy_eth.go +++ b/gateway/proxy_eth.go @@ -621,6 +621,14 @@ func (gw *Node) EthTraceReplayBlockTransactions(ctx context.Context, blkNum stri return gw.target.EthTraceReplayBlockTransactions(ctx, blkNum, traceTypes) } +func (gw *Node) EthTraceTransaction(ctx context.Context, txHash string) ([]*ethtypes.EthTraceTransaction, error) { + if err := gw.limit(ctx, stateRateLimitTokens); err != nil { + return nil, err + } + + return gw.target.EthTraceTransaction(ctx, txHash) +} + var EthMaxFiltersPerConn = 16 // todo make this configurable func addUserFilterLimited(ctx context.Context, cb func() (ethtypes.EthFilterID, error)) (ethtypes.EthFilterID, error) { diff --git a/gen/inlinegen-data.json b/gen/inlinegen-data.json index 70c8fff61f4..fd5569436c3 100644 --- a/gen/inlinegen-data.json +++ b/gen/inlinegen-data.json @@ -1,7 +1,7 @@ { - "actorVersions": [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], - "latestActorsVersion": 13, + "actorVersions": [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], + "latestActorsVersion": 14, - "networkVersions": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22], - "latestNetworkVersion": 22 + "networkVersions": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], + "latestNetworkVersion": 23 } diff --git a/go.mod b/go.mod index cc63a9c3852..76d5bb8c478 100644 --- a/go.mod +++ b/go.mod @@ -12,12 +12,9 @@ require ( github.com/DataDog/zstd v1.4.5 github.com/GeertJohan/go.rice v1.0.3 github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee - github.com/KarpelesLab/reflink v1.0.1 github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921 - github.com/buger/goterm v1.0.3 - github.com/charmbracelet/lipgloss v0.10.0 github.com/chzyer/readline v1.5.1 github.com/containerd/cgroups v1.1.0 github.com/coreos/go-systemd/v22 v22.5.0 @@ -32,7 +29,6 @@ require ( github.com/elastic/gosigar v0.14.2 github.com/etclabscore/go-openrpc-reflect v0.0.36 github.com/fatih/color v1.15.0 - github.com/filecoin-project/dagstore v0.5.2 github.com/filecoin-project/filecoin-ffi v0.30.4-0.20220519234331-bfd1f5f9fe38 github.com/filecoin-project/go-address v1.1.0 github.com/filecoin-project/go-amt-ipld/v4 v4.3.0 @@ -41,15 +37,12 @@ require ( github.com/filecoin-project/go-commp-utils v0.1.3 github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20220905160352-62059082a837 github.com/filecoin-project/go-crypto v0.0.1 - github.com/filecoin-project/go-data-transfer/v2 v2.0.0-rc8 github.com/filecoin-project/go-fil-commcid v0.1.0 - github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 - github.com/filecoin-project/go-fil-markets v1.28.3 github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 - github.com/filecoin-project/go-jsonrpc v0.3.1 + github.com/filecoin-project/go-jsonrpc v0.3.2 github.com/filecoin-project/go-padreader v0.0.1 github.com/filecoin-project/go-paramfetch v0.0.4 - github.com/filecoin-project/go-state-types v0.13.3 + github.com/filecoin-project/go-state-types v0.14.0-dev github.com/filecoin-project/go-statemachine v1.0.3 github.com/filecoin-project/go-statestore v0.2.0 github.com/filecoin-project/go-storedcounter v0.1.0 @@ -65,7 +58,7 @@ require ( github.com/filecoin-project/test-vectors/schema v0.0.7 github.com/gbrlsnchs/jwt/v3 v3.0.1 github.com/gdamore/tcell/v2 v2.2.0 - github.com/georgysavva/scany/v2 v2.0.0 + github.com/georgysavva/scany/v2 v2.1.3 github.com/go-openapi/spec v0.19.11 github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.6.0 @@ -91,24 +84,18 @@ require ( github.com/ipfs/go-ds-leveldb v0.5.0 github.com/ipfs/go-ds-measure v0.2.0 github.com/ipfs/go-fs-lock v0.0.7 - github.com/ipfs/go-graphsync v0.17.0 - github.com/ipfs/go-ipfs-blocksutil v0.0.1 github.com/ipfs/go-ipld-cbor v0.1.0 github.com/ipfs/go-ipld-format v0.6.0 github.com/ipfs/go-log/v2 v2.5.1 github.com/ipfs/go-metrics-interface v0.0.1 github.com/ipfs/go-metrics-prometheus v0.0.2 - github.com/ipfs/go-unixfsnode v1.9.0 github.com/ipld/go-car v0.6.2 github.com/ipld/go-car/v2 v2.13.1 - github.com/ipld/go-codec-dagpb v1.6.0 github.com/ipld/go-ipld-prime v0.21.0 - github.com/ipld/go-ipld-selector-text-lite v0.0.1 github.com/ipni/go-libipni v0.0.8 - github.com/ipni/index-provider v0.12.0 - github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa - github.com/jackc/pgx/v5 v5.4.1 + github.com/jackc/pgerrcode v0.0.0-20240316143900-6e2875d9b438 github.com/kelseyhightower/envconfig v1.4.0 + github.com/klauspost/compress v1.17.8 github.com/koalacxr/quantile v0.0.1 github.com/libp2p/go-buffer-pool v0.1.0 github.com/libp2p/go-libp2p v0.34.1 @@ -122,17 +109,14 @@ require ( github.com/mattn/go-isatty v0.0.20 github.com/mattn/go-sqlite3 v1.14.16 github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 - github.com/minio/sha256-simd v1.0.1 github.com/mitchellh/go-homedir v1.1.0 github.com/multiformats/go-base32 v0.1.0 github.com/multiformats/go-multiaddr v0.12.4 github.com/multiformats/go-multiaddr-dns v0.3.1 - github.com/multiformats/go-multibase v0.2.0 github.com/multiformats/go-multicodec v0.9.0 github.com/multiformats/go-multihash v0.2.3 github.com/multiformats/go-varint v0.0.7 github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 - github.com/pkg/errors v0.9.1 github.com/polydawn/refmt v0.89.0 github.com/prometheus/client_golang v1.19.1 github.com/puzpuzpuz/xsync/v2 v2.4.0 @@ -161,12 +145,10 @@ require ( go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 golang.org/x/crypto v0.23.0 - golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 golang.org/x/net v0.25.0 golang.org/x/sync v0.7.0 golang.org/x/sys v0.20.0 golang.org/x/term v0.20.0 - golang.org/x/text v0.15.0 golang.org/x/time v0.5.0 golang.org/x/tools v0.21.0 golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 @@ -181,12 +163,9 @@ require ( github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/StackExchange/wmi v1.2.1 // indirect github.com/akavel/rsrc v0.8.0 // indirect - github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect - github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bep/debounce v1.2.1 // indirect github.com/buger/jsonparser v1.1.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect @@ -205,7 +184,6 @@ require ( github.com/etclabscore/go-jsonschema-walk v0.0.6 // indirect github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 // indirect github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 // indirect - github.com/filecoin-project/go-ds-versioning v0.1.2 // indirect github.com/filecoin-project/go-hamt-ipld v0.1.5 // indirect github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 // indirect github.com/flynn/noise v1.1.0 // indirect @@ -228,12 +206,11 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20240509144519-723abb6459b7 // indirect - github.com/hannahhoward/cbor-gen-for v0.0.0-20230214144701-5d17c9d5243c // indirect + github.com/gopherjs/gopherjs v1.17.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/huin/goupnp v1.3.0 // indirect github.com/iancoleman/orderedmap v0.1.0 // indirect - github.com/ipfs/go-bitfield v1.1.0 // indirect github.com/ipfs/go-blockservice v0.5.2 // indirect github.com/ipfs/go-ipfs-blockstore v1.3.1 // indirect github.com/ipfs/go-ipfs-delay v0.0.1 // indirect @@ -242,32 +219,28 @@ require ( github.com/ipfs/go-ipfs-pq v0.0.3 // indirect github.com/ipfs/go-ipfs-util v0.0.3 // indirect github.com/ipfs/go-ipld-legacy v0.2.1 // indirect - github.com/ipfs/go-libipfs v0.7.0 // indirect github.com/ipfs/go-log v1.0.5 // indirect github.com/ipfs/go-merkledag v0.11.0 // indirect github.com/ipfs/go-peertaskqueue v0.8.1 // indirect github.com/ipfs/go-verifcid v0.0.3 // indirect - github.com/ipld/go-ipld-adl-hamt v0.0.0-20220616142416-9004dbd839e0 // indirect + github.com/ipld/go-codec-dagpb v1.6.0 // indirect github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 // indirect + github.com/jackc/pgx/v5 v5.4.1 // indirect github.com/jackc/puddle/v2 v2.2.1 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect - github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jbenet/goprocess v0.1.4 // indirect github.com/jessevdk/go-flags v1.4.0 // indirect github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/jpillora/backoff v1.0.0 // indirect github.com/kilic/bls12-381 v0.1.0 // indirect - github.com/klauspost/compress v1.17.8 // indirect github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect - github.com/libp2p/go-libp2p-gostream v0.6.0 // indirect github.com/libp2p/go-libp2p-kbucket v0.6.3 // indirect github.com/libp2p/go-nat v0.2.0 // indirect github.com/libp2p/go-netroute v0.2.1 // indirect @@ -282,11 +255,11 @@ require ( github.com/miekg/dns v1.1.59 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect + github.com/minio/sha256-simd v1.0.1 // indirect github.com/mr-tron/base58 v1.2.0 // indirect - github.com/muesli/reflow v0.3.0 // indirect - github.com/muesli/termenv v0.15.2 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect github.com/multiformats/go-multistream v0.5.0 // indirect github.com/nikkolasg/hexjson v0.1.0 // indirect github.com/nkovacs/streamquote v1.0.0 // indirect @@ -311,6 +284,7 @@ require ( github.com/pion/transport/v2 v2.2.5 // indirect github.com/pion/turn/v2 v2.1.6 // indirect github.com/pion/webrtc/v3 v3.2.40 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.53.0 // indirect @@ -325,7 +299,6 @@ require ( github.com/sirupsen/logrus v1.9.2 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/tidwall/gjson v1.14.4 // indirect - github.com/twmb/murmur3 v1.1.6 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.0.1 // indirect github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect @@ -343,7 +316,9 @@ require ( go.uber.org/dig v1.17.1 // indirect go.uber.org/mock v0.4.0 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect + golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect golang.org/x/mod v0.17.0 // indirect + golang.org/x/text v0.15.0 // indirect gonum.org/v1/gonum v0.15.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 // indirect google.golang.org/grpc v1.64.0 // indirect diff --git a/go.sum b/go.sum index 4de7bda1496..d1d53543052 100644 --- a/go.sum +++ b/go.sum @@ -36,13 +36,11 @@ contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxa contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9fpw1KeYcjrnC1J8B+JKjsZyRQ= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -60,7 +58,6 @@ github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee h1:8doiS7ib3zi6/K1 github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee/go.mod h1:W0GbEAA4uFNYOGG2cJpmFJ04E6SD1NLELPYZB57/7AY= github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU= github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa h1:1PPxEyGdIGVkX/kqMvLJ95a1dGS1Sz7tpNEgehEYYt0= github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa/go.mod h1:WUmMvh9wMtqj1Xhf1hf3kp9RvL+y6odtdYxpyZjb90U= @@ -73,17 +70,13 @@ github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tN github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/akavel/rsrc v0.8.0 h1:zjWn7ukO9Kc5Q62DOJCcxGpXC18RawVtYAGdz2aLlfw= github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921 h1:T3+cD5fYvuH36h7EZq+TDpm+d8a6FSD4pQsbmuGGQ8o= @@ -97,20 +90,9 @@ github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8V github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/ardanlabs/darwin/v2 v2.0.0 h1:XCisQMgQ5EG+ZvSEcADEo+pyfIMKyWAGnn5o2TgriYE= github.com/ardanlabs/darwin/v2 v2.0.0/go.mod h1:MubZ2e9DAYGaym0mClSOi183NYahrrfKxvSy1HMhoes= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= -github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -121,16 +103,10 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bep/debounce v1.2.1 h1:v67fRdBA9UQu2NhLFXrSg0Brw7CexQekrBwDMM8bzeY= -github.com/bep/debounce v1.2.1/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= -github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= github.com/btcsuite/btcd v0.24.0 h1:gL3uHE/IaFj6fcZSu03SvqPMSx7s/dPzfpG/atRwWdo= github.com/btcsuite/btcd v0.24.0/go.mod h1:K4IDc1593s8jKXIF7yS7yCTSxrknB9z0STzc2j6XgE4= github.com/btcsuite/btcd/btcec/v2 v2.1.3 h1:xM/n3yIhHAhHy04z4i43C8p4ehixJZMsnrVJkgl+MTE= @@ -143,24 +119,17 @@ github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9 github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= -github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= -github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/buger/goterm v1.0.3 h1:7V/HeAQHrzPk/U4BvyH2g9u+xbUW9nr4yRPyG59W4fM= -github.com/buger/goterm v1.0.3/go.mod h1:HiFWV3xnkolgrBV3mY8m0X0Pumt4zg4QhbdOzQtB8tE= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= @@ -168,9 +137,6 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/charmbracelet/lipgloss v0.10.0 h1:KWeXFSexGcfahHX+54URiZGkBFazf70JNMtwg/AFW3s= -github.com/charmbracelet/lipgloss v0.10.0/go.mod h1:Wig9DSfvANsxqkRsqj6x87irdy123SR4dOXlKa91ciE= -github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= @@ -183,15 +149,12 @@ github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38 github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.9.1 h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4= github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/cockroach-go/v2 v2.2.0 h1:/5znzg5n373N/3ESjHF5SMLxiW4RKB05Ql//KWfeTFs= github.com/cockroachdb/cockroach-go/v2 v2.2.0/go.mod h1:u3MiKYGupPPjkn3ozknpMUpxPaNLTFWAya419/zv6eI= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= @@ -200,13 +163,10 @@ github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ0/FNU= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= @@ -215,7 +175,6 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHH github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668 h1:ZFUue+PNxmHlu7pYv+IYMtqlaO/0VwaGEqKepZf9JpA= github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= @@ -225,7 +184,6 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= @@ -237,19 +195,14 @@ github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0 github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e h1:lj77EKYUpYXTd8CD/+QMIf8b6OIOTsfEBSXiAzuEHTU= github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e/go.mod h1:3ZQK6DMPSz/QZ73jlWxBtUhNA8xZx7LzUFSq/OfP8vk= github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= -github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= github.com/dgraph-io/badger/v2 v2.2007.3/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= -github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= @@ -263,14 +216,9 @@ github.com/drand/kyber v1.3.0 h1:TVd7+xoRgKQ4Ck1viNLPFy6IWhuZM36Bq6zDXD8Asls= github.com/drand/kyber v1.3.0/go.mod h1:f+mNHjiGT++CuueBrpeMhFNdKZAsy0tu03bKq9D5LPA= github.com/drand/kyber-bls12381 v0.3.1 h1:KWb8l/zYTP5yrvKTgvhOrk2eNPscbMiUOIeWBnmUxGo= github.com/drand/kyber-bls12381 v0.3.1/go.mod h1:H4y9bLPu7KZA/1efDg+jtJ7emKx+ro3PU7/jWUVt140= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elastic/go-elasticsearch/v7 v7.14.0 h1:extp3jos/rwJn3J+lgbaGlwAgs0TVsIHme00GyNAyX4= github.com/elastic/go-elasticsearch/v7 v7.14.0/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= github.com/elastic/go-sysinfo v1.7.0 h1:4vVvcfi255+8+TyQ7TYUTEK3A+G8v5FLE+ZKYL1z1Dg= @@ -280,7 +228,6 @@ github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6 github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -289,14 +236,11 @@ github.com/etclabscore/go-jsonschema-walk v0.0.6 h1:DrNzoKWKd8f8XB5nFGBY00IcjakR github.com/etclabscore/go-jsonschema-walk v0.0.6/go.mod h1:VdfDY72AFAiUhy0ZXEaWSpveGjMT5JcDIm903NGqFwQ= github.com/etclabscore/go-openrpc-reflect v0.0.36 h1:kSqNB2U8RVoW4si+4fsv13NGNkRAQ5j78zTUx1qiehk= github.com/etclabscore/go-openrpc-reflect v0.0.36/go.mod h1:0404Ky3igAasAOpyj1eESjstTyneBAIk5PgJFbK4s5E= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGjnw8= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/filecoin-project/dagstore v0.5.2 h1:Nd6oXdnolbbVhpMpkYT5PJHOjQp4OBSntHpMV5pxj3c= -github.com/filecoin-project/dagstore v0.5.2/go.mod h1:mdqKzYrRBHf1pRMthYfMv3n37oOw0Tkx7+TxPt240M0= github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= github.com/filecoin-project/go-address v0.0.5/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= github.com/filecoin-project/go-address v1.1.0 h1:ofdtUtEsNxkIxkDw67ecSmvtzaVSdcea4boAmLbnHfE= @@ -324,17 +268,9 @@ github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20220905160352-62059082 github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-crypto v0.0.1 h1:AcvpSGGCgjaY8y1az6AMfKQWreF/pWO2JJGLl6gCq6o= github.com/filecoin-project/go-crypto v0.0.1/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= -github.com/filecoin-project/go-data-transfer/v2 v2.0.0-rc8 h1:EWC89lM/tJAjyzaxZ624clq3oyHLoLjISfoyG+WIu9s= -github.com/filecoin-project/go-data-transfer/v2 v2.0.0-rc8/go.mod h1:mK3/NbSljx3Kr335+IXEe8gcdEPA2eZXJaNhodK9bAI= -github.com/filecoin-project/go-ds-versioning v0.1.2 h1:to4pTadv3IeV1wvgbCbN6Vqd+fu+7tveXgv/rCEZy6w= -github.com/filecoin-project/go-ds-versioning v0.1.2/go.mod h1:C9/l9PnB1+mwPa26BBVpCjG/XQCB0yj/q5CK2J8X1I4= github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commcid v0.1.0 h1:3R4ds1A9r6cr8mvZBfMYxTS88OqLYEo6roi+GiIeOh8= github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= -github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 h1:imrrpZWEHRnNqqv0tN7LXep5bFEVOVmQWHJvl2mgsGo= -github.com/filecoin-project/go-fil-commp-hashhash v0.1.0/go.mod h1:73S8WSEWh9vr0fDJVnKADhfIv/d6dCbAGaAGWbdJEI8= -github.com/filecoin-project/go-fil-markets v1.28.3 h1:2cFu7tLZYrfNz4LnxjgERaVD7k5+Wwp0H76mnnTGPBk= -github.com/filecoin-project/go-fil-markets v1.28.3/go.mod h1:eryxo/oVgIxaR5g5CNr9PlvZOi+u/bak0IsPL/PT1hk= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= @@ -342,15 +278,13 @@ github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0/go.mod h1:7aWZdaQ1b16BVoQUYR+ github.com/filecoin-project/go-hamt-ipld/v3 v3.0.1/go.mod h1:gXpNmr3oQx8l3o7qkGyDjJjYSRX7hp/FGOStdqrWyDI= github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 h1:rVVNq0x6RGQIzCo1iiJlGFm9AGIZzeifggxtKMU7zmI= github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0/go.mod h1:bxmzgT8tmeVQA1/gvBwFmYdT8SOFUwB3ovSUfG1Ux0g= -github.com/filecoin-project/go-jsonrpc v0.3.1 h1:qwvAUc5VwAkooquKJmfz9R2+F8znhiqcNHYjEp/NM10= -github.com/filecoin-project/go-jsonrpc v0.3.1/go.mod h1:jBSvPTl8V1N7gSTuCR4bis8wnQnIjHbRPpROol6iQKM= +github.com/filecoin-project/go-jsonrpc v0.3.2 h1:uuAWTZe6B3AUUta+O26HlycGoej/yiaI1fXp3Du+D3I= +github.com/filecoin-project/go-jsonrpc v0.3.2/go.mod h1:jBSvPTl8V1N7gSTuCR4bis8wnQnIjHbRPpROol6iQKM= github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak= github.com/filecoin-project/go-padreader v0.0.1 h1:8h2tVy5HpoNbr2gBRr+WD6zV6VD6XHig+ynSGJg8ZOs= github.com/filecoin-project/go-padreader v0.0.1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ= github.com/filecoin-project/go-paramfetch v0.0.4 h1:H+Me8EL8T5+79z/KHYQQcT8NVOzYVqXIi7nhb48tdm8= github.com/filecoin-project/go-paramfetch v0.0.4/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts= -github.com/filecoin-project/go-retrieval-types v1.2.0 h1:fz6DauLVP3GRg7UuW7HZ6sE+GTmaUW70DTXBF1r9cK0= -github.com/filecoin-project/go-retrieval-types v1.2.0/go.mod h1:ojW6wSw2GPyoRDBGqw1K6JxUcbfa5NOSIiyQEeh7KK0= github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= @@ -358,9 +292,8 @@ github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psS github.com/filecoin-project/go-state-types v0.1.6/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= github.com/filecoin-project/go-state-types v0.1.10/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= github.com/filecoin-project/go-state-types v0.13.1/go.mod h1:cHpOPup9H1g2T29dKHAjC2sc7/Ef5ypjuW9A3I+e9yY= -github.com/filecoin-project/go-state-types v0.13.3 h1:9JPkC0E6HDtfHbaOIrFiCDzT/Z0jRTb9En4Y4Ci/b3w= -github.com/filecoin-project/go-state-types v0.13.3/go.mod h1:cHpOPup9H1g2T29dKHAjC2sc7/Ef5ypjuW9A3I+e9yY= -github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= +github.com/filecoin-project/go-state-types v0.14.0-dev h1:bDwq1S28D7EC/uDmKU8vvNcdFw/YDsNq09pe3zeV5h4= +github.com/filecoin-project/go-state-types v0.14.0-dev/go.mod h1:cHpOPup9H1g2T29dKHAjC2sc7/Ef5ypjuW9A3I+e9yY= github.com/filecoin-project/go-statemachine v1.0.3 h1:N07o6alys+V1tNoSTi4WuuoeNC4erS/6jE74+NsgQuk= github.com/filecoin-project/go-statemachine v1.0.3/go.mod h1:jZdXXiHa61n4NmgWFG4w8tnqgvZVHYbJ3yW7+y8bF54= github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= @@ -391,15 +324,10 @@ github.com/filecoin-project/specs-actors/v8 v8.0.1/go.mod h1:UYIPg65iPWoFw5NEftR github.com/filecoin-project/test-vectors/schema v0.0.7 h1:hhrcxLnQR2Oe6fjk63hZXG1fWQGyxgCVXOOlAlR/D9A= github.com/filecoin-project/test-vectors/schema v0.0.7/go.mod h1:WqdmeJrz0V37wp7DucRR/bvrScZffqaCyIk9G0BGw1o= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= @@ -413,8 +341,8 @@ github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdk github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= github.com/gdamore/tcell/v2 v2.2.0 h1:vSyEgKwraXPSOkvCk7IwOSyX+Pv3V2cV9CikJMXg4U4= github.com/gdamore/tcell/v2 v2.2.0/go.mod h1:cTTuF84Dlj/RqmaCIV5p4w8uG1zWdk0SF6oBpwHp4fU= -github.com/georgysavva/scany/v2 v2.0.0 h1:RGXqxDv4row7/FYoK8MRXAZXqoWF/NM+NP0q50k3DKU= -github.com/georgysavva/scany/v2 v2.0.0/go.mod h1:sigOdh+0qb/+aOs3TVhehVT10p8qJL7K/Zhyz8vWo38= +github.com/georgysavva/scany/v2 v2.1.3 h1:Zd4zm/ej79Den7tBSU2kaTDPAH64suq4qlQdhiBeGds= +github.com/georgysavva/scany/v2 v2.1.3/go.mod h1:fqp9yHZzM/PFVa3/rYEC57VmDx+KDch0LoqrJzkvtos= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= @@ -426,7 +354,6 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= @@ -437,7 +364,6 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -461,7 +387,6 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh github.com/go-openapi/swag v0.19.8/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= github.com/go-openapi/swag v0.19.11 h1:RFTu/dlFySpyVvJDfp/7674JY4SDglYWKztbiIGFpmc= github.com/go-openapi/swag v0.19.11/go.mod h1:Uc0gKkdR+ojzsEpjh39QChyu92vPgIr72POcgHMAgSY= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= @@ -476,13 +401,10 @@ github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -491,9 +413,7 @@ github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -545,7 +465,6 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -554,7 +473,6 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -569,10 +487,8 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20240509144519-723abb6459b7 h1:velgFPYr1X9TDwLIfkV7fWqsFlf7TeP11M/7kPd/dVI= github.com/google/pprof v0.0.0-20240509144519-723abb6459b7/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -585,55 +501,32 @@ github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORR github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/gregdhill/go-openrpc v0.0.0-20220114144539-ae6f44720487 h1:NyaWOSkqFK1d9o+HLfnMIGzrHuUUPeBNIZyi5Zoe/lY= github.com/gregdhill/go-openrpc v0.0.0-20220114144539-ae6f44720487/go.mod h1:a1eRkbhd3DYpRH2lnuUsVG+QMTI+v0hGnsis8C9hMrA= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026 h1:BpJ2o0OR5FV7vrkDYfXYVJQeMNWa8RhklZOpW2ITAIQ= github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026/go.mod h1:5Scbynm8dF1XAPwIwkGPqzkM/shndPm79Jd1003hTjE= -github.com/hannahhoward/cbor-gen-for v0.0.0-20230214144701-5d17c9d5243c h1:iiD+p+U0M6n/FsO6XIZuOgobnNa48FxtyYFfWwLttUQ= -github.com/hannahhoward/cbor-gen-for v0.0.0-20230214144701-5d17c9d5243c/go.mod h1:jvfsLIxk0fY/2BKSQ1xf2406AKA5dwMmKKv0ADcOfN8= github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e h1:3YKHER4nmd7b5qy5t0GWDTwSn4OyRgfAXSmo6VnryBY= github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e/go.mod h1:I8h3MITA53gN9OnWGCgaMa0JWVRdXthWw4M3CPM54OY= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= @@ -644,12 +537,7 @@ github.com/hashicorp/golang-lru/arc/v2 v2.0.7/go.mod h1:Pe7gBlGdc8clY5LJ0LpJXMt5 github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= @@ -665,7 +553,6 @@ github.com/icza/mighty v0.0.0-20180919140131-cfd07d671de6 h1:8UsGZ2rr2ksmEru6lTo github.com/icza/mighty v0.0.0-20180919140131-cfd07d671de6/go.mod h1:xQig96I1VNBDIWGCdTt54nHt6EeI639SmHycLYL7FkA= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab h1:HqW4xhhynfjrtEiiSGcQUd6vrK23iMam1FO8rI7mwig= github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/invopop/jsonschema v0.12.0 h1:6ovsNSuvn9wEQVOyc72aycBMVQFKz7cPdMJn10CvzRI= @@ -679,7 +566,6 @@ github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbG github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU= github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= -github.com/ipfs/go-bitswap v0.5.1/go.mod h1:P+ckC87ri1xFLvk74NlXdP0Kj9RmWAh4+H78sC6Qopo= github.com/ipfs/go-bitswap v0.11.0 h1:j1WVvhDX1yhG32NTC9xfxnqycqYIlhzEzLXG/cU1HyQ= github.com/ipfs/go-bitswap v0.11.0/go.mod h1:05aE8H3XOU+LXpTedeAS0OZpcO1WFsj5niYQH9a1Tmk= github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= @@ -688,19 +574,16 @@ github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WW github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM= github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= -github.com/ipfs/go-blockservice v0.2.1/go.mod h1:k6SiwmgyYgs4M/qt+ww6amPeUH9EISLRBnvUurKJhi8= github.com/ipfs/go-blockservice v0.5.2 h1:in9Bc+QcXwd1apOVM7Un9t8tixPKdaHQFdLSUM1Xgk8= github.com/ipfs/go-blockservice v0.5.2/go.mod h1:VpMblFEqG67A/H2sHKAemeH9vlURVavlysbdUI632yk= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.4-0.20191112011718-79e75dffeb10/go.mod h1:/BYOuUoxkE+0f6tGzlzMvycuN+5l35VOR4Bpg2sCmds= -github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cid v0.1.0/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= github.com/ipfs/go-cid v0.2.0/go.mod h1:P+HXFDF4CVhaVayiEb4wkAy7zBHxBwsJyt0Y5U6MLro= github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= @@ -709,13 +592,7 @@ github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA= github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= -github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= -github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= github.com/ipfs/go-datastore v0.5.1/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= @@ -723,16 +600,11 @@ github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8 github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= -github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= -github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= -github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro= github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= github.com/ipfs/go-ds-badger2 v0.1.3 h1:Zo9JicXJ1DmXTN4KOw7oPXkspZ0AWHcAFCP1tQKnegg= github.com/ipfs/go-ds-badger2 v0.1.3/go.mod h1:TPhhljfrgewjbtuL/tczP8dNrBYwwk+SdPYbms/NO9w= github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= -github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo= github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= github.com/ipfs/go-ds-measure v0.2.0 h1:sG4goQe0KDTccHMyT45CY1XyUbxe5VwTKpg2LjApYyQ= @@ -740,12 +612,7 @@ github.com/ipfs/go-ds-measure v0.2.0/go.mod h1:SEUD/rE2PwRa4IQEC5FuNAmjJCyYObZr9 github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28L7zESmM= github.com/ipfs/go-fs-lock v0.0.7 h1:6BR3dajORFrFTkb5EpCUFIAypsoxpGpDSVUdFwzgL9U= github.com/ipfs/go-fs-lock v0.0.7/go.mod h1:Js8ka+FNYmgQRLrRXzU3CB/+Csr1BwrRilEcvYrHhhc= -github.com/ipfs/go-graphsync v0.17.0 h1:1gh10v94G/vSGzfApVtbZSvSKkK906Y+2sRqewjDTm4= -github.com/ipfs/go-graphsync v0.17.0/go.mod h1:HXHiTRIw3wrN3InMwdV+IzpBAtreEf/KqFlEibhfVgo= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= -github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= -github.com/ipfs/go-ipfs-blockstore v0.2.1/go.mod h1:jGesd8EtCM3/zPgx+qr0/feTXGUeRai6adgwC+Q+JvE= -github.com/ipfs/go-ipfs-blockstore v1.1.2/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY= github.com/ipfs/go-ipfs-blockstore v1.2.0/go.mod h1:eh8eTFLiINYNSNawfZOC7HOxNTxpB1PFuA5E1m/7exE= github.com/ipfs/go-ipfs-blockstore v1.3.1 h1:cEI9ci7V0sRNivqaOr0elDsamxXFxJMMMy7PTTDQNsQ= github.com/ipfs/go-ipfs-blockstore v1.3.1/go.mod h1:KgtZyc9fq+P2xJUiCAzbRdhhqJHvsw8u2Dlqy2MyRTE= @@ -758,30 +625,22 @@ github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1Y github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= -github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU= github.com/ipfs/go-ipfs-ds-help v1.1.1 h1:B5UJOH52IbcfS56+Ul+sv8jnIV10lbjLF5eOO0C66Nw= github.com/ipfs/go-ipfs-ds-help v1.1.1/go.mod h1:75vrVCkSdSFidJscs8n4W+77AtTpCIAdDGAwjitJMIo= github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= -github.com/ipfs/go-ipfs-exchange-interface v0.1.0/go.mod h1:ych7WPlyHqFvCi/uQI48zLZuAWVP5iTQPXEfVaw5WEI= github.com/ipfs/go-ipfs-exchange-interface v0.2.1 h1:jMzo2VhLKSHbVe+mHNzYgs95n0+t0Q69GQ5WhRDZV/s= github.com/ipfs/go-ipfs-exchange-interface v0.2.1/go.mod h1:MUsYn6rKbG6CTtsDp+lKJPmVt3ZrCViNyH3rfPGsZ2E= github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= -github.com/ipfs/go-ipfs-exchange-offline v0.1.1/go.mod h1:vTiBRIbzSwDD0OWm+i3xeT0mO7jG2cbJYatp3HPk5XY= github.com/ipfs/go-ipfs-exchange-offline v0.3.0 h1:c/Dg8GDPzixGd0MC8Jh6mjOwU57uYokgWRFidfvEkuA= github.com/ipfs/go-ipfs-exchange-offline v0.3.0/go.mod h1:MOdJ9DChbb5u37M1IcbrRB02e++Z7521fMxqCNRrz9s= github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= -github.com/ipfs/go-ipfs-files v0.3.0 h1:fallckyc5PYjuMEitPNrjRfpwl7YFt69heCOUhsbGxQ= -github.com/ipfs/go-ipfs-files v0.3.0/go.mod h1:xAUtYMwB+iu/dtf6+muHNSFQCJG2dSiStR2P6sn9tIM= -github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= -github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE= github.com/ipfs/go-ipfs-pq v0.0.3/go.mod h1:btNw5hsHBpRcSSgZtiNm/SLj5gYIZ18AKtv3kERkRb4= github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= -github.com/ipfs/go-ipfs-routing v0.2.1/go.mod h1:xiNNiwgjmLqPS1cimvAw6EyB9rkVDbiocA4yY+wRNLM= github.com/ipfs/go-ipfs-routing v0.3.0 h1:9W/W3N+g+y4ZDeffSgqhgo7BsBSJwPMcyssET9OWevc= github.com/ipfs/go-ipfs-routing v0.3.0/go.mod h1:dKqtTFIql7e1zYsEuWLyuOU+E0WJWW8JjbTPLParDWo= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= @@ -798,29 +657,19 @@ github.com/ipfs/go-ipld-cbor v0.1.0 h1:dx0nS0kILVivGhfWuB6dUpMa/LAwElHPw1yOGYopo github.com/ipfs/go-ipld-cbor v0.1.0/go.mod h1:U2aYlmVrJr2wsUBU67K4KgepApSZddGRDWBYR0H4sCk= github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= -github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= github.com/ipfs/go-ipld-format v0.3.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= github.com/ipfs/go-ipld-format v0.4.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= github.com/ipfs/go-ipld-format v0.6.0 h1:VEJlA2kQ3LqFSIm5Vu6eIlSxD/Ze90xtc4Meten1F5U= github.com/ipfs/go-ipld-format v0.6.0/go.mod h1:g4QVMTn3marU3qXchwjpKPKgJv+zF+OlaKMyhJ4LHPg= -github.com/ipfs/go-ipld-legacy v0.1.0/go.mod h1:86f5P/srAmh9GcIcWQR9lfFLZPrIyyXQeVlOWeeWEuI= github.com/ipfs/go-ipld-legacy v0.2.1 h1:mDFtrBpmU7b//LzLSypVrXsD8QxkEWxu5qVxN99/+tk= github.com/ipfs/go-ipld-legacy v0.2.1/go.mod h1:782MOUghNzMO2DER0FlBR94mllfdCJCkTtDtPM51otM= -github.com/ipfs/go-libipfs v0.7.0 h1:Mi54WJTODaOL2/ZSm5loi3SwI3jI2OuFWUrQIkJ5cpM= -github.com/ipfs/go-libipfs v0.7.0/go.mod h1:KsIf/03CqhICzyRGyGo68tooiBE2iFbI/rXW7FhAYr0= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= github.com/ipfs/go-log v1.0.0/go.mod h1:JO7RzlMK6rA+CIxFMLOuB6Wf5b81GDiKElL7UPSIKjA= -github.com/ipfs/go-log v1.0.1/go.mod h1:HuWlQttfN6FWNHRhlY5yMk/lW7evQC0HHGOxEwMRR8I= -github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk= -github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs= github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= github.com/ipfs/go-log/v2 v2.0.1/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= -github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= -github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= -github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.2/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= @@ -830,8 +679,6 @@ github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= github.com/ipfs/go-merkledag v0.2.4/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= -github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= -github.com/ipfs/go-merkledag v0.5.1/go.mod h1:cLMZXx8J08idkp5+id62iVftUQV+HlYJ3PIhDfZsjA4= github.com/ipfs/go-merkledag v0.11.0 h1:DgzwK5hprESOzS4O1t/wi6JDpyVQdvm9Bs59N/jqfBY= github.com/ipfs/go-merkledag v0.11.0/go.mod h1:Q4f/1ezvBiJV0YCIXvt51W/9/kqJGH4I1LsA7+djsM4= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= @@ -839,12 +686,9 @@ github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j github.com/ipfs/go-metrics-prometheus v0.0.2 h1:9i2iljLg12S78OhC6UAiXi176xvQGiZaGVF1CUVdE+s= github.com/ipfs/go-metrics-prometheus v0.0.2/go.mod h1:ELLU99AQQNi+zX6GCGm2lAgnzdSH3u5UVlCdqSXnEks= github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.7.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= github.com/ipfs/go-peertaskqueue v0.8.1 h1:YhxAs1+wxb5jk7RvS0LHdyiILpNmRIRnZVztekOF0pg= github.com/ipfs/go-peertaskqueue v0.8.1/go.mod h1:Oxxd3eaK279FxeydSPPVGHzbwVeHjatZ2GA8XD+KbPU= github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= -github.com/ipfs/go-unixfs v0.4.5 h1:wj8JhxvV1G6CD7swACwSKYa+NgtdWC1RUit+gFnymDU= -github.com/ipfs/go-unixfs v0.4.5/go.mod h1:BIznJNvt/gEx/ooRMI4Us9K8+qeGO7vx1ohnbk8gjFg= github.com/ipfs/go-unixfsnode v1.9.0 h1:ubEhQhr22sPAKO2DNsyVBW7YB/zA8Zkif25aBvz8rc8= github.com/ipfs/go-unixfsnode v1.9.0/go.mod h1:HxRu9HYHOjK6HUqFBAi++7DVoWAHn0o4v/nZ/VA+0g8= github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= @@ -853,38 +697,23 @@ github.com/ipfs/go-verifcid v0.0.3/go.mod h1:gcCtGniVzelKrbk9ooUSX/pM3xlH73fZZJD github.com/ipld/go-car v0.1.0/go.mod h1:RCWzaUh2i4mOEkB3W45Vc+9jnS/M6Qay5ooytiBHl3g= github.com/ipld/go-car v0.6.2 h1:Hlnl3Awgnq8icK+ze3iRghk805lu8YNq3wlREDTF2qc= github.com/ipld/go-car v0.6.2/go.mod h1:oEGXdwp6bmxJCZ+rARSkDliTeYnVzv3++eXajZ+Bmr8= -github.com/ipld/go-car/v2 v2.1.1/go.mod h1:+2Yvf0Z3wzkv7NeI69i8tuZ+ft7jyjPYIWZzeVNeFcI= github.com/ipld/go-car/v2 v2.13.1 h1:KnlrKvEPEzr5IZHKTXLAEub+tPrzeAFQVRlSQvuxBO4= github.com/ipld/go-car/v2 v2.13.1/go.mod h1:QkdjjFNGit2GIkpQ953KBwowuoukoM75nP/JI1iDJdo= -github.com/ipld/go-codec-dagpb v1.2.0/go.mod h1:6nBN7X7h8EOsEejZGqC7tej5drsdBAXbMHyBT+Fne5s= -github.com/ipld/go-codec-dagpb v1.3.0/go.mod h1:ga4JTU3abYApDC3pZ00BC2RSvC3qfBb9MSJkMLSwnhA= github.com/ipld/go-codec-dagpb v1.6.0 h1:9nYazfyu9B1p3NAgfVdpRco3Fs2nFC72DqVsMj6rOcc= github.com/ipld/go-codec-dagpb v1.6.0/go.mod h1:ANzFhfP2uMJxRBr8CE+WQWs5UsNa0pYtmKZ+agnUw9s= -github.com/ipld/go-ipld-adl-hamt v0.0.0-20220616142416-9004dbd839e0 h1:QAI/Ridj0+foHD6epbxmB4ugxz9B4vmNdYSmQLGa05E= -github.com/ipld/go-ipld-adl-hamt v0.0.0-20220616142416-9004dbd839e0/go.mod h1:odxGcpiQZLzP5+yGu84Ljo8y3EzCvNAQKEodHNsHLXA= github.com/ipld/go-ipld-prime v0.0.2-0.20191108012745-28a82f04c785/go.mod h1:bDDSvVz7vaK12FNvMeRYnpRFkSUPNQOiCYQezMD/P3w= -github.com/ipld/go-ipld-prime v0.9.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= -github.com/ipld/go-ipld-prime v0.9.1-0.20210324083106-dc342a9917db/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= -github.com/ipld/go-ipld-prime v0.10.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= -github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= -github.com/ipld/go-ipld-prime v0.14.0/go.mod h1:9ASQLwUFLptCov6lIYc70GRB4V7UTyLD0IJtrDJe6ZM= github.com/ipld/go-ipld-prime v0.19.0/go.mod h1:Q9j3BaVXwaA3o5JUDNvptDDr/x8+F7FG6XJ8WI3ILg4= github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU= -github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73/go.mod h1:2PJ0JgxyB08t0b2WKrcuqI3di0V+5n6RS/LTUJhkoxY= github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd h1:gMlw/MhNr2Wtp5RwGdsW23cs+yCuj9k2ON7i9MiJlRo= github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd/go.mod h1:wZ8hH8UxeryOs4kJEJaiui/s00hDSbE37OKsL47g+Sw= -github.com/ipld/go-ipld-selector-text-lite v0.0.1 h1:lNqFsQpBHc3p5xHob2KvEg/iM5dIFn6iw4L/Hh+kS1Y= -github.com/ipld/go-ipld-selector-text-lite v0.0.1/go.mod h1:U2CQmFb+uWzfIEF3I1arrDa5rwtj00PrpiwwCO+k1RM= github.com/ipni/go-libipni v0.0.8 h1:0wLfZRSBG84swmZwmaLKul/iB/FlBkkl9ZcR1ub+Z+w= github.com/ipni/go-libipni v0.0.8/go.mod h1:paYP9U4N3/vOzGCuN9kU972vtvw9JUcQjOKyiCFGwRk= -github.com/ipni/index-provider v0.12.0 h1:R3F6dxxKNv4XkE4GJZNLOG0bDEbBQ/S5iztXwSD8jhQ= -github.com/ipni/index-provider v0.12.0/go.mod h1:GhyrADJp7n06fqoc1djzkvL4buZYHzV8SoWrlxEo5F4= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= -github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa h1:s+4MhCQ6YrzisK6hFJUX53drDT4UsSW3DEhKn0ifuHw= -github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds= +github.com/jackc/pgerrcode v0.0.0-20240316143900-6e2875d9b438 h1:Dj0L5fhJ9F82ZJyVOmBx6msDp/kfd1t9GRfny/mfJA0= +github.com/jackc/pgerrcode v0.0.0-20240316143900-6e2875d9b438/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 h1:L0QtFUgDarD7Fpv9jeVMgy/+Ec0mtnmYuImjTz6dtDA= @@ -899,7 +728,6 @@ github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7Bd github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= -github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c h1:uUx61FiAa1GI6ZmVd2wf2vULeQZIKG66eybjNXKYCz4= github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c/go.mod h1:sdx1xVM9UuLw1tXnhJWN3piypTUO3vCIHYmG15KE/dU= github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs= github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= @@ -912,24 +740,19 @@ github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0 github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0 h1:lQ1bL/n9mBNeIXoTUoYRlK4dHuNJVofX9oWqBtPnSzI= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= @@ -940,7 +763,6 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= @@ -956,7 +778,6 @@ github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8 github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= @@ -967,7 +788,6 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= -github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -988,7 +808,6 @@ github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2 github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= -github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= @@ -996,68 +815,23 @@ github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QT github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= -github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= -github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= -github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= -github.com/libp2p/go-eventbus v0.2.1/go.mod h1:jc2S4SoEVPP48H9Wpzm5aiGwUCBMfGhVhhBjyhhCJs8= github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= -github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68qHM0BxUM= github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= -github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54= -github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= -github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= -github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= -github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= github.com/libp2p/go-libp2p v0.34.1 h1:fxn9vyLo7vJcXQRNvdRbyPjbzuQgi2UiqC8hEbn8a18= github.com/libp2p/go-libp2p v0.34.1/go.mod h1:snyJQix4ET6Tj+LeI0VPjjxTtdWpeOhYt5lEY0KirkQ= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= -github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= -github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= -github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= -github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= -github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= -github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= -github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= -github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= -github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= -github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= -github.com/libp2p/go-libp2p-core v0.0.4/go.mod h1:jyuCQP356gzfCFtRKyvAbNkyeuxb7OlyhWZ3nls5d2I= -github.com/libp2p/go-libp2p-core v0.2.0/go.mod h1:X0eyB0Gy93v0DZtSYbEM7RnMChm9Uv3j7yRXjO77xSI= -github.com/libp2p/go-libp2p-core v0.2.2/go.mod h1:8fcwTbsG2B+lTgRJ1ICZtiM5GWCWZVoVrLaDRvIRng0= -github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= -github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= -github.com/libp2p/go-libp2p-core v0.3.1/go.mod h1:thvWy0hvaSBhnVBaW37BvzgVV68OUhgJJLAa6almrII= -github.com/libp2p/go-libp2p-core v0.4.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= -github.com/libp2p/go-libp2p-core v0.5.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= -github.com/libp2p/go-libp2p-core v0.5.1/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= -github.com/libp2p/go-libp2p-core v0.5.4/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= -github.com/libp2p/go-libp2p-core v0.5.5/go.mod h1:vj3awlOr9+GMZJFH9s4mpt9RHHgGqeHCopzbYKZdRjM= -github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= -github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= -github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= -github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.9.0/go.mod h1:ESsbz31oC3C1AvMJoGx26RTuCkNhmkSRCqZ0kQtJ2/8= github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= -github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= -github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= -github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= -github.com/libp2p/go-libp2p-gostream v0.6.0 h1:QfAiWeQRce6pqnYfmIVWJFXNdDyfiR/qkCnjyaZUPYU= -github.com/libp2p/go-libp2p-gostream v0.6.0/go.mod h1:Nywu0gYZwfj7Jc91PQvbGU8dIpqbQQkjWgDuOrFaRdA= github.com/libp2p/go-libp2p-kad-dht v0.25.2 h1:FOIk9gHoe4YRWXTu8SY9Z1d0RILol0TrtApsMDPjAVQ= github.com/libp2p/go-libp2p-kad-dht v0.25.2/go.mod h1:6za56ncRHYXX4Nc2vn8z7CZK0P4QiMcrn77acKLM2Oo= github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0= @@ -1065,145 +839,61 @@ github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEH github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= -github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= -github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= -github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw= -github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= -github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= -github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= -github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= -github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= -github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnqhVnMNQZo9nkSCuAbnQ= -github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= -github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= -github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= -github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= -github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= github.com/libp2p/go-libp2p-pubsub v0.11.0 h1:+JvS8Kty0OiyUiN0i8H5JbaCgjnJTRnTHe4rU88dLFc= github.com/libp2p/go-libp2p-pubsub v0.11.0/go.mod h1:QEb+hEV9WL9wCiUAnpY29FZR6W3zK8qYlaml8R4q6gQ= -github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= github.com/libp2p/go-libp2p-routing-helpers v0.7.3 h1:u1LGzAMVRK9Nqq5aYDVOiq/HaB93U9WWczBzGyAC5ZY= github.com/libp2p/go-libp2p-routing-helpers v0.7.3/go.mod h1:cN4mJAD/7zfPKXBcs9ze31JGYAZgzdABEm+q/hkswb8= github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= -github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= -github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= -github.com/libp2p/go-libp2p-secio v0.2.2/go.mod h1:wP3bS+m5AUnFA+OFO7Er03uO1mncHG0uVwGrwvjYlNY= github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4= -github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= -github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= -github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= -github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= -github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= -github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= -github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= -github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= -github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= -github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= -github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= -github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= -github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw= -github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ48OpsfmQVTErwA= -github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU= -github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= -github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= -github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= -github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= -github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= github.com/libp2p/go-maddr-filter v0.1.0 h1:4ACqZKw8AqiuJfwFGq1CYDFugfXTOos+qQ3DETkhtCE= github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU= github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= -github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= -github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= -github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= -github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= -github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= -github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= -github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= -github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= -github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= -github.com/libp2p/go-netroute v0.1.6/go.mod h1:AqhkMh0VuWmfgtxKPp3Oc1LdU5QSWS7wl0QLhSZqXxQ= github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU= github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ= -github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= -github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= -github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= -github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= -github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= -github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-sockaddr v0.1.1/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= -github.com/libp2p/go-stream-muxer-multistream v0.3.0/go.mod h1:yDh8abSIzmZtqtOt64gFJUXEryejzNb0lisTt+fAMJA= github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= -github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= -github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= -github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo= -github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= -github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= -github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA= github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magefile/mage v1.9.0 h1:t3AU2wNwehMCW97vuqQLtw6puppWXHO+O2MHo5a50XE= github.com/magefile/mage v1.9.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magik6k/reflink v1.0.2-patch1 h1:NXSgQugcESI8Z/jBtuAI83YsZuRauY9i9WOyOnJ7Vns= -github.com/magik6k/reflink v1.0.2-patch1/go.mod h1:WGkTOKNjd1FsJKBw3mu4JvrPEDJyJJ+JPtxBkbPoCok= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -1213,9 +903,6 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= -github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= -github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= -github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -1225,7 +912,6 @@ github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVc github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= @@ -1233,20 +919,15 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= -github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs= github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk= @@ -1266,14 +947,8 @@ github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1286,10 +961,6 @@ github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjW github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= -github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= -github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo= -github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= github.com/multiformats/go-base32 v0.0.4/go.mod h1:jNLFzjPZtp3aIARHbJRZIaPuspdH0J6q39uUM5pnABM= github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= @@ -1300,38 +971,23 @@ github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE= github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= -github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= -github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= -github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= github.com/multiformats/go-multiaddr v0.12.4 h1:rrKqpY9h+n80EwhhC/kkcunCZZ7URIF8yN1WEUt2Hvc= github.com/multiformats/go-multiaddr v0.12.4/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= -github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= -github.com/multiformats/go-multiaddr-net v0.1.0/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= -github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= -github.com/multiformats/go-multiaddr-net v0.1.2/go.mod h1:QsWt3XK/3hwvNxZJp92iMQKME1qHfpYmyIjFVsSOY6Y= -github.com/multiformats/go-multiaddr-net v0.1.3/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= -github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= -github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= github.com/multiformats/go-multicodec v0.6.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= @@ -1343,31 +999,19 @@ github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpK github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= -github.com/multiformats/go-multihash v0.1.0/go.mod h1:RJlXsxt6vHGaia+S8We0ErjhojtKzPP2AH4+kYM7k84= github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= -github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= -github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= -github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE= github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM= @@ -1378,28 +1022,21 @@ github.com/nkovacs/streamquote v1.0.0/go.mod h1:BN+NaZ2CmdKqUuTUXUEm9j95B2TRbpOW github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.17.3 h1:oJcvKpIb7/8uLpDDtnQuf18xVnwKp8DTD7DQ6gTd/MU= github.com/onsi/ginkgo/v2 v2.17.3/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= -github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.33.0 h1:snPCflnZrpMsy94p4lXVEkHo12lmPnc3vY5XBbreexE= github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 h1:CznVS40zms0Dj5he4ERo+fRPtO0qxUk8lA8Xu3ddet0= github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333/go.mod h1:Ag6rSXkHIckQmjFBCweJEEt1mrTPBv8b9W4aU/NQWfI= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -1407,30 +1044,18 @@ github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pion/datachannel v1.5.6 h1:1IxKJntfSlYkpUj8LlYRSWpYiTTC02nUrOE8T3DqGeg= github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNIVb/NfGW4= github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= @@ -1479,7 +1104,6 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= @@ -1489,15 +1113,11 @@ github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a/go.mod h1:uIp+gprXx github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= @@ -1505,20 +1125,15 @@ github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5 github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= @@ -1528,10 +1143,8 @@ github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3 github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= @@ -1552,29 +1165,22 @@ github.com/raulk/clock v1.1.0 h1:dpb29+UKMbLqiU/jqIJptgLR1nn23HLgMY0sTCDza5Y= github.com/raulk/clock v1.1.0/go.mod h1:3MpVxdZ/ODBQDxbN+kzshf5OSZwPjtMDx6BBXBmOeY0= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/zerolog v1.21.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA= github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sercand/kuberesolver/v4 v4.0.0 h1:frL7laPDG/lFm5n98ODmWnn+cvPpzlkf3LhzuPhcHP4= github.com/sercand/kuberesolver/v4 v4.0.0/go.mod h1:F4RGyuRmMAjeXHKL+w4P7AwUnPceEAPAhxUgXZjKgvM= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= @@ -1607,7 +1213,6 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.2 h1:oxx1eChJGI6Uks2ZC4W1zpLlVgqB8ner4EuQwV4Ik1Y= github.com/sirupsen/logrus v1.9.2/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -1622,9 +1227,6 @@ github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:s github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= -github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= @@ -1634,16 +1236,10 @@ github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0b github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= @@ -1681,21 +1277,15 @@ github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JT github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/triplewz/poseidon v0.0.0-20230828015038-79d8165c88ed h1:C8H2ql+vCBhEi7d3vMBBbdCAKv9s/thfPyLEuSvFpMU= github.com/triplewz/poseidon v0.0.0-20230828015038-79d8165c88ed/go.mod h1:QYG1d0B4YZD7TgF6qZndTTu4rxUGFCCZAQRDanDj+9c= -github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg= -github.com/twmb/murmur3 v1.1.6/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli/v2 v2.0.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= github.com/urfave/cli/v2 v2.25.5 h1:d0NIAyhh5shGscroL7ek/Ya9QYQE0KNabJgiUinIQkc= github.com/urfave/cli/v2 v2.25.5/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= @@ -1704,7 +1294,6 @@ github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8W github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= -github.com/warpfork/go-testmark v0.3.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= github.com/warpfork/go-testmark v0.10.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s= github.com/warpfork/go-testmark v0.12.1/go.mod h1:kHwy7wfvGSPh1rQJYKayD4AbtNaeyZdcGi9tNJTaa5Y= @@ -1725,13 +1314,11 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:x github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg= -github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200723185710-6a3894a6352b/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200806213330-63aa96ca5488/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200810223238-211df3b9e24c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200812213548-958ddffe352c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= -github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20210303213153-67a261a1d291/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20220323183124-98fa8256a799/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= @@ -1743,23 +1330,19 @@ github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9 github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= -github.com/whyrusleeping/go-logging v0.0.1/go.mod h1:lDPYj54zutzG1XYfHAhcc7oNXEburHQBn+Iqd4yS4vE= github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8= github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= -github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= -github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xorcare/golden v0.6.0/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ= github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 h1:oWgZJmC1DorFZDpfMfWg7xk29yEOZiXmo/wZl+utTI8= github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ= @@ -1786,42 +1369,31 @@ go.dedis.ch/fixbuf v1.0.3 h1:hGcV9Cd/znUxlusJ64eAlExS+5cJDIyTyEG+otu5wQs= go.dedis.ch/fixbuf v1.0.3/go.mod h1:yzJMt34Wa5xD37V5RTdmp38cz3QhMagdGoem9anUalw= go.dedis.ch/protobuf v1.0.11 h1:FTYVIEzY/bfl37lu3pR4lIj+F9Vp1jE8oh91VmxKgLo= go.dedis.ch/protobuf v1.0.11/go.mod h1:97QR256dnkimeNdfmURz0wAMNVbd1VmLXhG1CrTYrJ4= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs= go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4= go.opentelemetry.io/otel/bridge/opencensus v0.39.0 h1:YHivttTaDhbZIHuPlg1sWsy2P5gj57vzqPfkHItgbwQ= go.opentelemetry.io/otel/bridge/opencensus v0.39.0/go.mod h1:vZ4537pNjFDXEx//WldAR6Ro2LC8wwmFC76njAXwNPE= go.opentelemetry.io/otel/exporters/jaeger v1.14.0 h1:CjbUNd4iN2hHmWekmOqZ+zSCU+dzZppG8XsV+A3oc8Q= go.opentelemetry.io/otel/exporters/jaeger v1.14.0/go.mod h1:4Ay9kk5vELRrbg5z4cpP9EtmQRFap2Wb0woPG4lujZA= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30= go.opentelemetry.io/otel/metric v1.26.0/go.mod h1:SY+rHOI4cEawI9a7N1A4nIg/nTQXe1ccCNWYOJUrpX4= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= go.opentelemetry.io/otel/sdk v1.26.0 h1:Y7bumHf5tAiDlRYFmGqetNcLaVUZmh4iYfmGxtmz7F8= go.opentelemetry.io/otel/sdk v1.26.0/go.mod h1:0p8MXpqLeJ0pzcszQQN4F0S5FVjBLgypeGSngLsmirs= go.opentelemetry.io/otel/sdk/metric v0.39.0 h1:Kun8i1eYf48kHH83RucG93ffz0zGV1sh46FAScOTuDI= go.opentelemetry.io/otel/sdk/metric v0.39.0/go.mod h1:piDIRgjcK7u0HCL5pCA4e74qpK/jk3NiUoAHATVAmiI= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA= go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -1832,25 +1404,20 @@ go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= go.uber.org/fx v1.21.1 h1:RqBh3cYdzZS0uqwVeEjOX2p73dddLpym315myy/Bpb0= go.uber.org/fx v1.21.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= -go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= @@ -1862,7 +1429,6 @@ go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEb golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1875,22 +1441,14 @@ golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= @@ -1906,7 +1464,6 @@ golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= @@ -1914,8 +1471,6 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20210615023648-acb5c1269671/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= -golang.org/x/exp v0.0.0-20210714144626-1041f73d31d8/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= @@ -1933,12 +1488,10 @@ golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPI golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -1946,19 +1499,15 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -1973,9 +1522,7 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1997,9 +1544,7 @@ golang.org/x/net v0.0.0-20201022231255-08b38378de70/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -2045,13 +1590,10 @@ golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2062,14 +1604,12 @@ golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190302025703-b6889370fb10/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190524122548-abf6ff778158/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2079,14 +1619,12 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2112,18 +1650,12 @@ golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210317225723-c4fcb01b228e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2194,7 +1726,6 @@ golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -2214,17 +1745,13 @@ golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -2245,7 +1772,6 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= @@ -2263,7 +1789,6 @@ gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -2297,7 +1822,6 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -2332,23 +1856,17 @@ google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmE google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= @@ -2364,7 +1882,6 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= @@ -2375,20 +1892,13 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= -gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -2413,7 +1923,6 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= @@ -2423,7 +1932,5 @@ lukechampine.com/blake3 v1.3.0/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1 rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/itests/api_test.go b/itests/api_test.go index ff43bd5c02e..e3a41256e34 100644 --- a/itests/api_test.go +++ b/itests/api_test.go @@ -74,7 +74,7 @@ func (ts *apiSuite) testVersion(t *testing.T) { versions := strings.Split(v.Version, "+") require.NotZero(t, len(versions), "empty version") - require.Equal(t, versions[0], build.BuildVersion) + require.Equal(t, versions[0], build.NodeBuildVersion) } func (ts *apiSuite) testID(t *testing.T) { @@ -116,11 +116,11 @@ func (ts *apiSuite) testConnectTwo(t *testing.T) { return len(peerIDs) } - require.Equal(t, countPeerIDs(peers), 2, "node one doesn't have 2 peers") + require.Equal(t, countPeerIDs(peers), 1, "node one doesn't have 1 peer") peers, err = two.NetPeers(ctx) require.NoError(t, err) - require.Equal(t, countPeerIDs(peers), 2, "node one doesn't have 2 peers") + require.Equal(t, countPeerIDs(peers), 1, "node one doesn't have 1 peer") } func (ts *apiSuite) testSearchMsg(t *testing.T) { diff --git a/itests/batch_deal_test.go b/itests/batch_deal_test.go deleted file mode 100644 index 21db9f08d0e..00000000000 --- a/itests/batch_deal_test.go +++ /dev/null @@ -1,147 +0,0 @@ -// stm: #integration -package itests - -import ( - "context" - "fmt" - "sort" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - - "github.com/filecoin-project/lotus/itests/kit" - "github.com/filecoin-project/lotus/markets/storageadapter" - "github.com/filecoin-project/lotus/node" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/node/modules" - "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/storage/pipeline/sealiface" -) - -func TestBatchDealInput(t *testing.T) { - //stm: @MINER_SECTOR_STATUS_001, @MINER_SECTOR_LIST_001 - kit.QuietMiningLogs() - - var ( - blockTime = 10 * time.Millisecond - - // For these tests where the block time is artificially short, just use - // a deal start epoch that is guaranteed to be far enough in the future - // so that the deal starts sealing in time - dealStartEpoch = abi.ChainEpoch(2 << 12) - ) - - run := func(piece, deals, expectSectors int) func(t *testing.T) { - return func(t *testing.T) { - t.Logf("batchtest start") - - ctx := context.Background() - - publishPeriod := 10 * time.Second - maxDealsPerMsg := uint64(deals) - - // Set max deals per publish deals message to maxDealsPerMsg - opts := kit.ConstructorOpts(node.Options( - node.Override( - new(*storageadapter.DealPublisher), - storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{ - Period: publishPeriod, - MaxDealsPerMsg: maxDealsPerMsg, - })), - node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) { - return func() (sealiface.Config, error) { - cfg := config.DefaultStorageMiner() - sc := modules.ToSealingConfig(cfg.Dealmaking, cfg.Sealing) - sc.MaxWaitDealsSectors = 2 - sc.MaxSealingSectors = 1 - sc.MaxSealingSectorsForDeals = 3 - sc.AlwaysKeepUnsealedCopy = true - sc.WaitDealsDelay = time.Hour - sc.AggregateCommits = false - - return sc, nil - }, nil - }), - )) - client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts, kit.ThroughRPC()) - ens.InterconnectAll().BeginMining(blockTime) - dh := kit.NewDealHarness(t, client, miner, miner) - - err := miner.MarketSetAsk(ctx, big.Zero(), big.Zero(), 200, 128, 32<<30) - require.NoError(t, err) - - t.Logf("batchtest ask set") - - checkNoPadding := func() { - sl, err := miner.SectorsListNonGenesis(ctx) - require.NoError(t, err) - - sort.Slice(sl, func(i, j int) bool { - return sl[i] < sl[j] - }) - - for _, snum := range sl { - si, err := miner.SectorsStatus(ctx, snum, false) - require.NoError(t, err) - - // fmt.Printf("S %d: %+v %s\n", snum, si.Deals, si.State) - - for _, deal := range si.Deals { - if deal == 0 { - fmt.Printf("sector %d had a padding piece!\n", snum) - } - } - } - } - - // Starts a deal and waits until it's published - runDealTillSeal := func(rseed int) { - res, _, _, err := kit.CreateImportFile(ctx, client, rseed, piece) - require.NoError(t, err) - - dp := dh.DefaultStartDealParams() - dp.Data.Root = res.Root - dp.DealStartEpoch = dealStartEpoch - - deal := dh.StartDeal(ctx, dp) - dh.WaitDealSealed(ctx, deal, false, true, checkNoPadding) - } - - // Run maxDealsPerMsg deals in parallel - done := make(chan struct{}, maxDealsPerMsg) - for rseed := 0; rseed < int(maxDealsPerMsg); rseed++ { - rseed := rseed - go func() { - runDealTillSeal(rseed) - done <- struct{}{} - }() - } - - t.Logf("batchtest deals started") - - // Wait for maxDealsPerMsg of the deals to be published - for i := 0; i < int(maxDealsPerMsg); i++ { - <-done - } - - t.Logf("batchtest deals published") - - checkNoPadding() - - t.Logf("batchtest no padding") - - sl, err := miner.SectorsListNonGenesis(ctx) - require.NoError(t, err) - require.Equal(t, len(sl), expectSectors) - - t.Logf("batchtest done") - } - } - - t.Run("4-p1600B", run(1600, 4, 4)) - t.Run("4-p513B", run(513, 4, 2)) -} diff --git a/itests/cli_test.go b/itests/cli_test.go deleted file mode 100644 index d2a0876356b..00000000000 --- a/itests/cli_test.go +++ /dev/null @@ -1,27 +0,0 @@ -// stm: #integration -package itests - -import ( - "os" - "testing" - "time" - - "github.com/filecoin-project/lotus/cli/clicommands" - "github.com/filecoin-project/lotus/itests/kit" -) - -// TestClient does a basic test to exercise the client CLI commands. -func TestClient(t *testing.T) { - //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, - //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 - //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 - //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 - //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 - _ = os.Setenv("BELLMAN_NO_GPU", "1") - kit.QuietMiningLogs() - - blockTime := 5 * time.Millisecond - client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC()) - ens.InterconnectAll().BeginMining(blockTime) - kit.RunClientTest(t, clicommands.Commands, client) -} diff --git a/itests/curio_test.go b/itests/curio_test.go deleted file mode 100644 index 997352dd359..00000000000 --- a/itests/curio_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package itests - -import ( - "context" - "testing" - "time" - - "github.com/docker/go-units" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/cli/spcli" - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/itests/kit" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/node/impl" -) - -func TestCurioNewActor(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - full, miner, esemble := kit.EnsembleMinimal(t, - kit.LatestActorsAt(-1), - kit.MockProofs(), - kit.WithSectorIndexDB(), - ) - - esemble.Start() - blockTime := 100 * time.Millisecond - esemble.BeginMining(blockTime) - - db := miner.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB - - var titles []string - err := db.Select(ctx, &titles, `SELECT title FROM harmony_config WHERE LENGTH(config) > 0`) - require.NoError(t, err) - require.NotEmpty(t, titles) - require.NotContains(t, titles, "base") - - addr := miner.OwnerKey.Address - sectorSizeInt, err := units.RAMInBytes("8MiB") - require.NoError(t, err) - - maddr, err := spcli.CreateStorageMiner(ctx, full, addr, addr, addr, abi.SectorSize(sectorSizeInt), 0) - require.NoError(t, err) - - err = deps.CreateMinerConfig(ctx, full, db, []string{maddr.String()}, "FULL NODE API STRING") - require.NoError(t, err) - - err = db.Select(ctx, &titles, `SELECT title FROM harmony_config WHERE LENGTH(config) > 0`) - require.NoError(t, err) - require.Contains(t, titles, "base") - baseCfg := config.DefaultCurioConfig() - var baseText string - - err = db.QueryRow(ctx, "SELECT config FROM harmony_config WHERE title='base'").Scan(&baseText) - require.NoError(t, err) - _, err = deps.LoadConfigWithUpgrades(baseText, baseCfg) - require.NoError(t, err) - - require.NotNil(t, baseCfg.Addresses) - require.GreaterOrEqual(t, len(baseCfg.Addresses), 1) - - require.Contains(t, baseCfg.Addresses[0].MinerAddresses, maddr.String()) -} diff --git a/itests/deals_512mb_test.go b/itests/deals_512mb_test.go deleted file mode 100644 index 7b55204d910..00000000000 --- a/itests/deals_512mb_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// stm: #integration -package itests - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/itests/kit" -) - -func TestStorageDealMissingBlock(t *testing.T) { - //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, - //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 - //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 - //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 - - //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 - //stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001 - ctx := context.Background() - - kit.QuietMiningLogs() - - client, miner, ens := kit.EnsembleMinimal(t, - kit.MockProofs(), - kit.SectorSize(512<<20), // 512MiB sectors. - ) - ens.InterconnectAll().BeginMining(50 * time.Millisecond) - - dh := kit.NewDealHarness(t, client, miner, miner) - - client.WaitTillChain(ctx, kit.HeightAtLeast(5)) - - res, _ := client.CreateImportFile(ctx, 0, 64<<20) // 64MiB file. - list, err := client.ClientListImports(ctx) - require.NoError(t, err) - require.Len(t, list, 1) - require.Equal(t, res.Root, *list[0].Root) - - dp := dh.DefaultStartDealParams() - dp.Data.Root = res.Root - dp.FastRetrieval = true - dp.EpochPrice = abi.NewTokenAmount(62500000) // minimum asking price. - deal := dh.StartDeal(ctx, dp) - - dh.WaitDealSealed(ctx, deal, false, false, nil) -} diff --git a/itests/deals_anycid_test.go b/itests/deals_anycid_test.go deleted file mode 100644 index c17441090b1..00000000000 --- a/itests/deals_anycid_test.go +++ /dev/null @@ -1,162 +0,0 @@ -package itests - -import ( - "bufio" - "context" - "os" - "testing" - "time" - - dag "github.com/ipfs/boxo/ipld/merkledag" - "github.com/ipfs/go-cid" - ipldcbor "github.com/ipfs/go-ipld-cbor" - format "github.com/ipfs/go-ipld-format" - "github.com/ipld/go-car" - "github.com/ipld/go-car/v2/blockstore" - selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/itests/kit" - "github.com/filecoin-project/lotus/node" - "github.com/filecoin-project/lotus/node/modules" - "github.com/filecoin-project/lotus/node/modules/dtypes" -) - -func TestDealRetrieveByAnyCid(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode") - } - - ctx := context.Background() - - kit.QuietMiningLogs() - - // For these tests where the block time is artificially short, just use - // a deal start epoch that is guaranteed to be far enough in the future - // so that the deal starts sealing in time - startEpoch := abi.ChainEpoch(2 << 12) - - // Override the dependency injection for the blockstore accessor, so that - // we can get a reference to the blockstore containing our deal later in - // the test - var bsa storagemarket.BlockstoreAccessor - bsaFn := func(importmgr dtypes.ClientImportMgr) storagemarket.BlockstoreAccessor { - bsa = modules.StorageBlockstoreAccessor(importmgr) - return bsa - } - bsaOpt := kit.ConstructorOpts(node.Override(new(storagemarket.BlockstoreAccessor), bsaFn)) - - // Allow 8MB sectors - eightMBSectorsOpt := kit.SectorSize(8 << 20) - - // Create a client, and a miner with its own full node - _, client, miner, ens := kit.EnsembleTwoOne(t, kit.MockProofs(), bsaOpt, eightMBSectorsOpt) - ens.InterconnectAll().BeginMining(250 * time.Millisecond) - - dh := kit.NewDealHarness(t, client, miner, miner) - - // Generate a DAG with multiple levels, so that we can test the case where - // the client requests a CID for a block which is not the root block but - // does have a subtree below it in the DAG - dagOpts := kit.GeneratedDAGOpts{ - // Max size of a block - ChunkSize: 1024, - // Max links from a block to other blocks - Maxlinks: 10, - } - carv1FilePath, _ := kit.CreateRandomCARv1(t, 5, 100*1024, dagOpts) - res, err := client.ClientImport(ctx, api.FileRef{Path: carv1FilePath, IsCAR: true}) - require.NoError(t, err) - - // Get the blockstore for the file - bs, err := bsa.Get(res.Root) - require.NoError(t, err) - - // Get all CIDs from the file - sc := car.NewSelectiveCar(ctx, bs, []car.Dag{{Root: res.Root, Selector: selectorparse.CommonSelector_ExploreAllRecursively}}) - prepared, err := sc.Prepare() - require.NoError(t, err) - - reg := format.Registry{} - reg.Register(cid.DagProtobuf, dag.DecodeProtobufBlock) - reg.Register(cid.DagCBOR, ipldcbor.DecodeBlock) - reg.Register(cid.Raw, dag.DecodeRawBlock) - - cids := prepared.Cids() - for i, c := range cids { - blk, err := bs.Get(ctx, c) - require.NoError(t, err) - - nd, err := reg.Decode(blk) - require.NoError(t, err) - - t.Log(i, c, len(nd.Links())) - } - - // Create a storage deal - dp := dh.DefaultStartDealParams() - dp.Data.Root = res.Root - dp.DealStartEpoch = startEpoch - dp.EpochPrice = abi.NewTokenAmount(62500000) // minimum asking price - dealCid := dh.StartDeal(ctx, dp) - - // Wait for the deal to be sealed - dh.WaitDealSealed(ctx, dealCid, false, false, nil) - - ask, err := miner.MarketGetRetrievalAsk(ctx) - require.NoError(t, err) - ask.PricePerByte = abi.NewTokenAmount(0) - ask.UnsealPrice = abi.NewTokenAmount(0) - err = miner.MarketSetRetrievalAsk(ctx, ask) - require.NoError(t, err) - - // Fetch the deal data - info, err := client.ClientGetDealInfo(ctx, *dealCid) - require.NoError(t, err) - - // Make retrievals against CIDs at different levels in the DAG - cidIndices := []int{1, 11, 27, 32, 47} - for _, val := range cidIndices { - t.Logf("performing retrieval for cid at index %d", val) - - targetCid := cids[val] - offer, err := client.ClientMinerQueryOffer(ctx, miner.ActorAddr, targetCid, &info.PieceCID) - require.NoError(t, err) - require.Empty(t, offer.Err) - - // retrieve in a CAR file and ensure roots match - outputCar := dh.PerformRetrieval(ctx, dealCid, targetCid, true, offer) - _, err = os.Stat(outputCar) - require.NoError(t, err) - f, err := os.Open(outputCar) - require.NoError(t, err) - ch, err := car.ReadHeader(bufio.NewReader(f)) - require.NoError(t, err) - require.EqualValues(t, ch.Roots[0], targetCid) - require.NoError(t, f.Close()) - - // create CAR from original file starting at targetCid and ensure it matches the retrieved CAR file. - tmp, err := os.CreateTemp(t.TempDir(), "randcarv1") - require.NoError(t, err) - rd, err := blockstore.OpenReadOnly(carv1FilePath, blockstore.UseWholeCIDs(true)) - require.NoError(t, err) - err = car.NewSelectiveCar( - ctx, - rd, - []car.Dag{{ - Root: targetCid, - Selector: selectorparse.CommonSelector_ExploreAllRecursively, - }}, - ).Write(tmp) - require.NoError(t, err) - require.NoError(t, tmp.Close()) - require.NoError(t, rd.Close()) - - kit.AssertFilesEqual(t, tmp.Name(), outputCar) - t.Log("car files match") - } -} diff --git a/itests/deals_concurrent_test.go b/itests/deals_concurrent_test.go deleted file mode 100644 index a106836bdd1..00000000000 --- a/itests/deals_concurrent_test.go +++ /dev/null @@ -1,212 +0,0 @@ -// stm: #integration -package itests - -import ( - "context" - "fmt" - "sync" - "testing" - "time" - - provider "github.com/ipni/index-provider" - "github.com/stretchr/testify/require" - - datatransfer "github.com/filecoin-project/go-data-transfer/v2" - "github.com/filecoin-project/go-fil-markets/shared_testutil" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/itests/kit" - "github.com/filecoin-project/lotus/node" - "github.com/filecoin-project/lotus/node/modules" - "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/node/repo" -) - -// TestDealWithMarketAndMinerNode is running concurrently a number of storage and retrieval deals towards a miner -// architecture where the `mining/sealing/proving` node is a separate process from the `markets` node -func TestDealWithMarketAndMinerNode(t *testing.T) { - //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, - //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 - //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 - //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 - if testing.Short() { - t.Skip("skipping test in short mode") - } - - t.Skip("skipping due to flakiness: see #6956") - - kit.QuietMiningLogs() - - // For these tests where the block time is artificially short, just use - // a deal start epoch that is guaranteed to be far enough in the future - // so that the deal starts sealing in time - startEpoch := abi.ChainEpoch(8 << 10) - - runTest := func(t *testing.T, n int, fastRetrieval bool, carExport bool) { - api.RunningNodeType = api.NodeMiner // TODO(anteva): fix me - - idxProv := shared_testutil.NewMockIndexProvider() - idxProvOpt := kit.ConstructorOpts(node.Override(new(provider.Interface), idxProv)) - client, main, market, _ := kit.EnsembleWithMinerAndMarketNodes(t, kit.ThroughRPC(), idxProvOpt) - - dh := kit.NewDealHarness(t, client, main, market) - - dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{ - N: n, - FastRetrieval: fastRetrieval, - CarExport: carExport, - StartEpoch: startEpoch, - IndexProvider: idxProv, - }) - } - - // this test is expensive because we don't use mock proofs; do a single cycle. - cycles := []int{4} - for _, n := range cycles { - n := n - ns := fmt.Sprintf("%d", n) - t.Run(ns+"-fastretrieval-CAR", func(t *testing.T) { runTest(t, n, true, true) }) - t.Run(ns+"-fastretrieval-NoCAR", func(t *testing.T) { runTest(t, n, true, false) }) - t.Run(ns+"-stdretrieval-CAR", func(t *testing.T) { runTest(t, n, false, true) }) - t.Run(ns+"-stdretrieval-NoCAR", func(t *testing.T) { runTest(t, n, false, false) }) - } -} - -func TestDealCyclesConcurrent(t *testing.T) { - //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, - //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 - //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 - //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 - - //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 - if testing.Short() { - t.Skip("skipping test in short mode") - } - - kit.QuietMiningLogs() - - // For these tests where the block time is artificially short, just use - // a deal start epoch that is guaranteed to be far enough in the future - // so that the deal starts sealing in time - startEpoch := abi.ChainEpoch(2 << 12) - - runTest := func(t *testing.T, n int, fastRetrieval bool, carExport bool) { - client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs()) - ens.InterconnectAll().BeginMining(250 * time.Millisecond) - dh := kit.NewDealHarness(t, client, miner, miner) - - dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{ - N: n, - FastRetrieval: fastRetrieval, - CarExport: carExport, - StartEpoch: startEpoch, - }) - } - - // this test is cheap because we use mock proofs, do various cycles - cycles := []int{2, 4, 8, 16} - for _, n := range cycles { - n := n - ns := fmt.Sprintf("%d", n) - t.Run(ns+"-fastretrieval-CAR", func(t *testing.T) { runTest(t, n, true, true) }) - t.Run(ns+"-fastretrieval-NoCAR", func(t *testing.T) { runTest(t, n, true, false) }) - t.Run(ns+"-stdretrieval-CAR", func(t *testing.T) { runTest(t, n, false, true) }) - t.Run(ns+"-stdretrieval-NoCAR", func(t *testing.T) { runTest(t, n, false, false) }) - } -} - -func TestSimultanenousTransferLimit(t *testing.T) { - //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, - //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 - //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 - //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 - t.Skip("skipping as flaky #7152") - - if testing.Short() { - t.Skip("skipping test in short mode") - } - - kit.QuietMiningLogs() - - // For these tests where the block time is artificially short, just use - // a deal start epoch that is guaranteed to be far enough in the future - // so that the deal starts sealing in time - startEpoch := abi.ChainEpoch(2 << 12) - - const ( - graphsyncThrottle = 2 - concurrency = 20 - ) - runTest := func(t *testing.T) { - client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ConstructorOpts( - node.ApplyIf(node.IsType(repo.StorageMiner), node.Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(graphsyncThrottle, 0, graphsyncThrottle))), - node.Override(new(dtypes.Graphsync), modules.Graphsync(graphsyncThrottle, graphsyncThrottle)), - )) - ens.InterconnectAll().BeginMining(250 * time.Millisecond) - dh := kit.NewDealHarness(t, client, miner, miner) - - ctx, cancel := context.WithCancel(context.Background()) - - du, err := miner.MarketDataTransferUpdates(ctx) - require.NoError(t, err) - - var maxOngoing int - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - - ongoing := map[datatransfer.TransferID]struct{}{} - - for { - select { - case u := <-du: - t.Logf("%d - %s", u.TransferID, datatransfer.Statuses[u.Status]) - if u.Status == datatransfer.Ongoing && u.Transferred > 0 { - ongoing[u.TransferID] = struct{}{} - } else { - delete(ongoing, u.TransferID) - } - - if len(ongoing) > maxOngoing { - maxOngoing = len(ongoing) - } - case <-ctx.Done(): - return - } - } - }() - - t.Logf("running concurrent deals: %d", concurrency) - - dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{ - N: concurrency, - FastRetrieval: true, - StartEpoch: startEpoch, - }) - - t.Logf("all deals finished") - - cancel() - wg.Wait() - - // The eventing systems across go-data-transfer and go-graphsync - // are racy, and that's why we can't enforce graphsyncThrottle exactly, - // without making this test racy. - // - // Essentially what could happen is that the graphsync layer starts the - // next transfer before the go-data-transfer FSM has the opportunity to - // move the previously completed transfer to the next stage, thus giving - // the appearance that more than graphsyncThrottle transfers are - // in progress. - // - // Concurrency (20) is x10 higher than graphsyncThrottle (2), so if all - // 20 transfers are not happening at once, we know the throttle is - // in effect. Thus we are a little bit lenient here to account for the - // above races and allow up to graphsyncThrottle*2. - require.LessOrEqual(t, maxOngoing, graphsyncThrottle*2) - } - - runTest(t) -} diff --git a/itests/deals_max_staging_deals_test.go b/itests/deals_max_staging_deals_test.go deleted file mode 100644 index 738a1e2fed3..00000000000 --- a/itests/deals_max_staging_deals_test.go +++ /dev/null @@ -1,70 +0,0 @@ -// stm: #integration -package itests - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/itests/kit" -) - -func TestMaxStagingDeals(t *testing.T) { - //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, - //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 - //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 - //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 - - //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 - //stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001 - ctx := context.Background() - - kit.QuietMiningLogs() - - client, miner, ens := kit.EnsembleMinimal(t, - kit.MockProofs(), - kit.WithMaxStagingDealsBytes(8192), // max 8KB staging deals - kit.SectorSize(512<<20), // 512MiB sectors. - ) - ens.InterconnectAll().BeginMining(200 * time.Millisecond) - - dh := kit.NewDealHarness(t, client, miner, miner) - - client.WaitTillChain(ctx, kit.HeightAtLeast(5)) - - res, _ := client.CreateImportFile(ctx, 0, 8192) // 8KB file - list, err := client.ClientListImports(ctx) - require.NoError(t, err) - require.Len(t, list, 1) - - res2, _ := client.CreateImportFile(ctx, 0, 4096) - list, err = client.ClientListImports(ctx) - require.NoError(t, err) - require.Len(t, list, 2) - - // first deal stays in staging area, and is not yet passed to the sealing subsystem - dp := dh.DefaultStartDealParams() - dp.Data.Root = res.Root - dp.FastRetrieval = true - dp.EpochPrice = abi.NewTokenAmount(62500000) // minimum asking price. - deal := dh.StartDeal(ctx, dp) - - time.Sleep(1 * time.Second) - - // expecting second deal to fail since staging area is full - dp.Data.Root = res2.Root - dp.FastRetrieval = true - dp.EpochPrice = abi.NewTokenAmount(62500000) // minimum asking price. - deal2 := dh.StartDeal(ctx, dp) - - _ = deal - - err = dh.ExpectDealFailure(ctx, deal2, "cannot accept deal as miner is overloaded at the moment") - if err != nil { - t.Fatal(err) - } -} diff --git a/itests/deals_offline_test.go b/itests/deals_offline_test.go deleted file mode 100644 index 997d7723aa6..00000000000 --- a/itests/deals_offline_test.go +++ /dev/null @@ -1,107 +0,0 @@ -// stm: #integration -package itests - -import ( - "context" - "path/filepath" - "testing" - "time" - - "github.com/stretchr/testify/require" - - commcid "github.com/filecoin-project/go-fil-commcid" - commp "github.com/filecoin-project/go-fil-commp-hashhash" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-state-types/abi" - - lapi "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/itests/kit" -) - -func TestOfflineDealFlow(t *testing.T) { - //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, - //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 - //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 - //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 - - //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 - //stm: @CLIENT_DATA_CALCULATE_COMMP_001, @CLIENT_DATA_GENERATE_CAR_001, @CLIENT_DATA_GET_DEAL_PIECE_CID_001, @CLIENT_DATA_GET_DEAL_PIECE_CID_001 - runTest := func(t *testing.T, fastRet bool, upscale abi.PaddedPieceSize) { - ctx := context.Background() - client, miner, ens := kit.EnsembleMinimal(t, kit.WithAllSubsystems()) // no mock proofs - ens.InterconnectAll().BeginMining(250 * time.Millisecond) - - dh := kit.NewDealHarness(t, client, miner, miner) - - // Create a random file and import on the client. - res, inFile := client.CreateImportFile(ctx, 1, 200) - - // Get the piece size and commP - rootCid := res.Root - pieceInfo, err := client.ClientDealPieceCID(ctx, rootCid) - require.NoError(t, err) - t.Log("FILE CID:", rootCid) - - // test whether padding works as intended - if upscale > 0 { - newRawCp, err := commp.PadCommP( - pieceInfo.PieceCID.Hash()[len(pieceInfo.PieceCID.Hash())-32:], - uint64(pieceInfo.PieceSize), - uint64(upscale), - ) - require.NoError(t, err) - - pieceInfo.PieceSize = upscale - pieceInfo.PieceCID, err = commcid.DataCommitmentV1ToCID(newRawCp) - require.NoError(t, err) - } - - dp := dh.DefaultStartDealParams() - dp.DealStartEpoch = abi.ChainEpoch(4 << 10) - dp.FastRetrieval = fastRet - // Replace with params for manual storage deal (offline deal) - dp.Data = &storagemarket.DataRef{ - TransferType: storagemarket.TTManual, - Root: rootCid, - PieceCid: &pieceInfo.PieceCID, - PieceSize: pieceInfo.PieceSize.Unpadded(), - } - - proposalCid := dh.StartDeal(ctx, dp) - - //stm: @CLIENT_STORAGE_DEALS_GET_001 - // Wait for the deal to reach StorageDealCheckForAcceptance on the client - cd, err := client.ClientGetDealInfo(ctx, *proposalCid) - require.NoError(t, err) - require.Eventually(t, func() bool { - cd, _ := client.ClientGetDealInfo(ctx, *proposalCid) - return cd.State == storagemarket.StorageDealCheckForAcceptance - }, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State]) - - // Create a CAR file from the raw file - carFileDir := t.TempDir() - carFilePath := filepath.Join(carFileDir, "out.car") - err = client.ClientGenCar(ctx, lapi.FileRef{Path: inFile}, carFilePath) - require.NoError(t, err) - - // Import the CAR file on the miner - this is the equivalent to - // transferring the file across the wire in a normal (non-offline) deal - err = miner.DealsImportData(ctx, *proposalCid, carFilePath) - require.NoError(t, err) - - // Wait for the deal to be published - dh.WaitDealPublished(ctx, proposalCid) - - t.Logf("deal published, retrieving") - - // Retrieve the deal - outFile := dh.PerformRetrieval(ctx, proposalCid, rootCid, false) - - kit.AssertFilesEqual(t, inFile, outFile) - - } - - t.Run("stdretrieval", func(t *testing.T) { runTest(t, false, 0) }) - t.Run("fastretrieval", func(t *testing.T) { runTest(t, true, 0) }) - t.Run("fastretrieval", func(t *testing.T) { runTest(t, true, 1024) }) -} diff --git a/itests/deals_padding_test.go b/itests/deals_padding_test.go deleted file mode 100644 index aaca4536069..00000000000 --- a/itests/deals_padding_test.go +++ /dev/null @@ -1,84 +0,0 @@ -// stm: #integration -package itests - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/require" - - commcid "github.com/filecoin-project/go-fil-commcid" - commp "github.com/filecoin-project/go-fil-commp-hashhash" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/itests/kit" -) - -func TestDealPadding(t *testing.T) { - //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, - //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 - //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 - //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 - - //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 - //stm: @CLIENT_DATA_GET_DEAL_PIECE_CID_001 - kit.QuietMiningLogs() - - var blockTime = 250 * time.Millisecond - startEpoch := abi.ChainEpoch(2 << 12) - - client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.WithAllSubsystems()) // no mock proofs. - ens.InterconnectAll().BeginMining(blockTime) - dh := kit.NewDealHarness(t, client, miner, miner) - - ctx := context.Background() - client.WaitTillChain(ctx, kit.BlocksMinedByAll(miner.ActorAddr)) - - // Create a random file, would originally be a 256-byte sector - res, inFile := client.CreateImportFile(ctx, 1, 200) - - // Get the piece size and commP - pieceInfo, err := client.ClientDealPieceCID(ctx, res.Root) - require.NoError(t, err) - t.Log("FILE CID:", res.Root) - - runTest := func(t *testing.T, upscale abi.PaddedPieceSize) { - // test whether padding works as intended - newRawCp, err := commp.PadCommP( - pieceInfo.PieceCID.Hash()[len(pieceInfo.PieceCID.Hash())-32:], - uint64(pieceInfo.PieceSize), - uint64(upscale), - ) - require.NoError(t, err) - - pcid, err := commcid.DataCommitmentV1ToCID(newRawCp) - require.NoError(t, err) - - dp := dh.DefaultStartDealParams() - dp.Data.Root = res.Root - dp.Data.PieceCid = &pcid - dp.Data.PieceSize = upscale.Unpadded() - dp.DealStartEpoch = startEpoch - proposalCid := dh.StartDeal(ctx, dp) - - // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this - time.Sleep(time.Second) - - //stm: @CLIENT_STORAGE_DEALS_GET_001 - di, err := client.ClientGetDealInfo(ctx, *proposalCid) - require.NoError(t, err) - require.True(t, di.PieceCID.Equals(pcid)) - - dh.WaitDealSealed(ctx, proposalCid, false, false, nil) - - // Retrieve the deal - outFile := dh.PerformRetrieval(ctx, proposalCid, res.Root, false) - - kit.AssertFilesEqual(t, inFile, outFile) - } - - t.Run("padQuarterSector", func(t *testing.T) { runTest(t, 512) }) - t.Run("padHalfSector", func(t *testing.T) { runTest(t, 1024) }) - t.Run("padFullSector", func(t *testing.T) { runTest(t, 2048) }) -} diff --git a/itests/deals_partial_retrieval_dm-level_test.go b/itests/deals_partial_retrieval_dm-level_test.go deleted file mode 100644 index c03d07aac53..00000000000 --- a/itests/deals_partial_retrieval_dm-level_test.go +++ /dev/null @@ -1,267 +0,0 @@ -// stm: #integration -package itests - -import ( - "context" - "fmt" - "io" - "os" - "testing" - "time" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - "github.com/ipld/go-car" - textselector "github.com/ipld/go-ipld-selector-text-lite" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/api" - api0 "github.com/filecoin-project/lotus/api/v0api" - "github.com/filecoin-project/lotus/itests/kit" -) - -// please talk to @ribasushi or @mikeal before modifying these test: there are -// downstream dependencies on ADL-less operation -var ( - adlFixtureCar = "fixtures/adl_test.car" - adlFixtureRoot, _ = cid.Parse("bafybeiaigxwanoxyeuzyiknhrg6io6kobfbm37ozcips6qdwumub2gaomy") - adlFixtureCommp, _ = cid.Parse("baga6ea4seaqjnmnrv4qsfz2rnda54mvo5al22dwpguhn2pmep63gl7bbqqqraai") - adlFixturePieceSize = abi.PaddedPieceSize(1024) - dmSelector = api.Selector("Links/0/Hash") - dmTextSelector = textselector.Expression(dmSelector) - dmExpectedResult = "NO ADL" - dmExpectedCarBlockCount = 4 - dmDagSpec = []api.DagSpec{{DataSelector: &dmSelector, ExportMerkleProof: true}} -) - -func TestDMLevelPartialRetrieval(t *testing.T) { - //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, - //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 - //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 - //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 - - //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 - //stm: @CLIENT_RETRIEVAL_RETRIEVE_001, @CLIENT_RETRIEVAL_FIND_001 - ctx := context.Background() - - kit.QuietMiningLogs() - client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC()) - dh := kit.NewDealHarness(t, client, miner, miner) - ens.InterconnectAll().BeginMiningMustPost(50 * time.Millisecond) - - _, err := client.ClientImport(ctx, api.FileRef{Path: adlFixtureCar, IsCAR: true}) - require.NoError(t, err) - - caddr, err := client.WalletDefaultAddress(ctx) - require.NoError(t, err) - - // - // test retrieval from local car 1st - require.NoError(t, testDMExportAsCar( - ctx, client, api.ExportRef{ - FromLocalCAR: adlFixtureCar, - Root: adlFixtureRoot, - DAGs: dmDagSpec, - }, t.TempDir(), - )) - require.NoError(t, testDMExportAsFile( - ctx, client, api.ExportRef{ - FromLocalCAR: adlFixtureCar, - Root: adlFixtureRoot, - DAGs: dmDagSpec, - }, t.TempDir(), - )) - - // - // ensure V0 continues functioning as expected - require.NoError(t, tesV0RetrievalAsCar( - ctx, client, api0.RetrievalOrder{ - FromLocalCAR: adlFixtureCar, - Root: adlFixtureRoot, - DatamodelPathSelector: &dmTextSelector, - }, t.TempDir(), - )) - require.NoError(t, testV0RetrievalAsFile( - ctx, client, api0.RetrievalOrder{ - FromLocalCAR: adlFixtureCar, - Root: adlFixtureRoot, - DatamodelPathSelector: &dmTextSelector, - }, t.TempDir(), - )) - - // - // now perform a storage/retrieval deal as well, and retest - dp := dh.DefaultStartDealParams() - dp.Data = &storagemarket.DataRef{ - Root: adlFixtureRoot, - PieceCid: &adlFixtureCommp, - PieceSize: adlFixturePieceSize.Unpadded(), - } - proposalCid := dh.StartDeal(ctx, dp) - - // Wait for the deal to reach StorageDealCheckForAcceptance on the client - cd, err := client.ClientGetDealInfo(ctx, *proposalCid) - require.NoError(t, err) - require.Eventually(t, func() bool { - cd, _ := client.ClientGetDealInfo(ctx, *proposalCid) - return cd.State == storagemarket.StorageDealCheckForAcceptance - }, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State]) - - dh.WaitDealSealed(ctx, proposalCid, false, false, nil) - - offers, err := client.ClientFindData(ctx, adlFixtureRoot, nil) - require.NoError(t, err) - require.NotEmpty(t, offers, "no offers") - - retOrder := offers[0].Order(caddr) - retOrder.DataSelector = &dmSelector - - rr, err := client.ClientRetrieve(ctx, retOrder) - require.NoError(t, err) - - err = client.ClientRetrieveWait(ctx, rr.DealID) - require.NoError(t, err) - - require.NoError(t, testDMExportAsCar( - ctx, client, api.ExportRef{ - DealID: rr.DealID, - Root: adlFixtureRoot, - DAGs: dmDagSpec, - }, t.TempDir(), - )) - require.NoError(t, testDMExportAsFile( - ctx, client, api.ExportRef{ - DealID: rr.DealID, - Root: adlFixtureRoot, - DAGs: dmDagSpec, - }, t.TempDir(), - )) - -} - -func testDMExportAsFile(ctx context.Context, client *kit.TestFullNode, expDirective api.ExportRef, tempDir string) error { - out := tempDir + string(os.PathSeparator) + "exp-test" + expDirective.Root.String() - - fileDest := api.FileRef{ - Path: out, - } - err := client.ClientExport(ctx, expDirective, fileDest) - if err != nil { - return err - } - - f, err := os.Open(out) - if err != nil { - return err - } - - defer f.Close() //nolint:errcheck - - return validateDMUnixFile(f) -} -func testV0RetrievalAsFile(ctx context.Context, client *kit.TestFullNode, retOrder api0.RetrievalOrder, tempDir string) error { - out := tempDir + string(os.PathSeparator) + "exp-test" + retOrder.Root.String() - - cv0 := &api0.WrapperV1Full{FullNode: client.FullNode} - err := cv0.ClientRetrieve(ctx, retOrder, &api.FileRef{ - Path: out, - }) - if err != nil { - return err - } - - f, err := os.Open(out) - if err != nil { - return err - } - - defer f.Close() //nolint:errcheck - - return validateDMUnixFile(f) -} -func validateDMUnixFile(r io.Reader) error { - data, err := io.ReadAll(r) - if err != nil { - return err - } - if string(data) != dmExpectedResult { - return fmt.Errorf("retrieved data mismatch: expected '%s' got '%s'", dmExpectedResult, data) - } - - return nil -} - -func testDMExportAsCar(ctx context.Context, client *kit.TestFullNode, expDirective api.ExportRef, tempDir string) error { - out, err := os.CreateTemp(tempDir, "exp-test") - if err != nil { - return err - } - defer out.Close() //nolint:errcheck - - carDest := api.FileRef{ - IsCAR: true, - Path: out.Name(), - } - err = client.ClientExport(ctx, expDirective, carDest) - if err != nil { - return err - } - - return validateDMCar(out) -} -func tesV0RetrievalAsCar(ctx context.Context, client *kit.TestFullNode, retOrder api0.RetrievalOrder, tempDir string) error { - out, err := os.CreateTemp(tempDir, "exp-test") - if err != nil { - return err - } - defer out.Close() //nolint:errcheck - - cv0 := &api0.WrapperV1Full{FullNode: client.FullNode} - err = cv0.ClientRetrieve(ctx, retOrder, &api.FileRef{ - Path: out.Name(), - IsCAR: true, - }) - if err != nil { - return err - } - - return validateDMCar(out) -} -func validateDMCar(r io.Reader) error { - cr, err := car.NewCarReader(r) - if err != nil { - return err - } - - if len(cr.Header.Roots) != 1 { - return fmt.Errorf("expected a single root in result car, got %d", len(cr.Header.Roots)) - } else if cr.Header.Roots[0].String() != adlFixtureRoot.String() { - return fmt.Errorf("expected root cid '%s', got '%s'", adlFixtureRoot.String(), cr.Header.Roots[0].String()) - } - - blks := make([]blocks.Block, 0) - for { - b, err := cr.Next() - if err == io.EOF { - break - } else if err != nil { - return err - } - - blks = append(blks, b) - } - - if len(blks) != dmExpectedCarBlockCount { - return fmt.Errorf("expected a car file with %d blocks, got one with %d instead", dmExpectedCarBlockCount, len(blks)) - } - - data := fmt.Sprintf("%s%s", blks[2].RawData(), blks[3].RawData()) - if data != dmExpectedResult { - return fmt.Errorf("retrieved data mismatch: expected '%s' got '%s'", dmExpectedResult, data) - } - - return nil -} diff --git a/itests/deals_partial_retrieval_test.go b/itests/deals_partial_retrieval_test.go deleted file mode 100644 index 0bbf23da054..00000000000 --- a/itests/deals_partial_retrieval_test.go +++ /dev/null @@ -1,256 +0,0 @@ -// stm: #integration -package itests - -import ( - "context" - "fmt" - "io" - "os" - "testing" - "time" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - "github.com/ipld/go-car" - "github.com/stretchr/testify/require" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/itests/kit" -) - -// use the mainnet carfile as text fixture: it will always be here -// https://dweb.link/ipfs/bafy2bzacecnamqgqmifpluoeldx7zzglxcljo6oja4vrmtj7432rphldpdmm2/8/1/8/1/0/1/0 -var ( - sourceCar = "../build/genesis/mainnet.car" - carRoot, _ = cid.Parse("bafy2bzacecnamqgqmifpluoeldx7zzglxcljo6oja4vrmtj7432rphldpdmm2") - carCommp, _ = cid.Parse("baga6ea4seaqmrivgzei3fmx5qxtppwankmtou6zvigyjaveu3z2zzwhysgzuina") - selectedCid, _ = cid.Parse("bafkqaetgnfwc6mjpon2g64tbm5sxa33xmvza") - carPieceSize = abi.PaddedPieceSize(2097152) - textSelector = api.Selector("8/1/8/1/0/1/0") - textSelectorNonLink = api.Selector("8/1/8/1/0/1") - textSelectorNonexistent = api.Selector("42") - expectedResult = "fil/1/storagepower" -) - -func TestPartialRetrieval(t *testing.T) { - //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, - //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 - //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 - //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 - - //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 - //stm: @CLIENT_RETRIEVAL_RETRIEVE_001 - ctx := context.Background() - - kit.QuietMiningLogs() - client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.MockProofs(), kit.SectorSize(512<<20)) - dh := kit.NewDealHarness(t, client, miner, miner) - ens.InterconnectAll().BeginMining(50 * time.Millisecond) - - _, err := client.ClientImport(ctx, api.FileRef{Path: sourceCar, IsCAR: true}) - require.NoError(t, err) - - caddr, err := client.WalletDefaultAddress(ctx) - require.NoError(t, err) - - // first test retrieval from local car, then do an actual deal - for _, exportMerkleProof := range []bool{false, true} { - for _, fullCycle := range []bool{false, true} { - - var retOrder api.RetrievalOrder - var eref api.ExportRef - - if !fullCycle { - eref.FromLocalCAR = sourceCar - } else { - dp := dh.DefaultStartDealParams() - dp.Data = &storagemarket.DataRef{ - // FIXME: figure out how to do this with an online partial transfer - TransferType: storagemarket.TTManual, - Root: carRoot, - PieceCid: &carCommp, - PieceSize: carPieceSize.Unpadded(), - } - proposalCid := dh.StartDeal(ctx, dp) - - // Wait for the deal to reach StorageDealCheckForAcceptance on the client - cd, err := client.ClientGetDealInfo(ctx, *proposalCid) - require.NoError(t, err) - require.Eventually(t, func() bool { - cd, _ := client.ClientGetDealInfo(ctx, *proposalCid) - return cd.State == storagemarket.StorageDealCheckForAcceptance - }, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State]) - - err = miner.DealsImportData(ctx, *proposalCid, sourceCar) - require.NoError(t, err) - - // Wait for the deal to be published, we should be able to start retrieval right away - dh.WaitDealPublished(ctx, proposalCid) - - offers, err := client.ClientFindData(ctx, carRoot, nil) - require.NoError(t, err) - require.NotEmpty(t, offers, "no offers") - - retOrder = offers[0].Order(caddr) - } - - retOrder.DataSelector = &textSelector - eref.DAGs = append(eref.DAGs, api.DagSpec{ - DataSelector: &textSelector, - ExportMerkleProof: exportMerkleProof, - }) - eref.Root = carRoot - - // test retrieval of either data or constructing a partial selective-car - for _, retrieveAsCar := range []bool{false, true} { - outFile := t.TempDir() + string(os.PathSeparator) + "ret-file" + retOrder.Root.String() - - require.NoError(t, testGenesisRetrieval( - ctx, - client, - retOrder, - eref, - &api.FileRef{ - Path: outFile, - IsCAR: retrieveAsCar, - }, - )) - - // UGH if I do not sleep here, I get things like: - /* - retrieval failed: Retrieve failed: there is an active retrieval deal with peer 12D3KooWK9fB9a3HZ4PQLVmEQ6pweMMn5CAyKtumB71CPTnuBDi6 for payload CID bafy2bzacecnamqgqmifpluoeldx7zzglxcljo6oja4vrmtj7432rphldpdmm2 (retrieval deal ID 1631259332180384709, state DealStatusFinalizingBlockstore) - existing deal must be cancelled before starting a new retrieval deal: - github.com/filecoin-project/lotus/node/impl/client.(*API).ClientRetrieve - /home/circleci/project/node/impl/client/client.go:774 - */ - time.Sleep(time.Second) - } - } - } - - // ensure non-existent paths fail - require.EqualError( - t, - testGenesisRetrieval( - ctx, - client, - api.RetrievalOrder{ - Root: carRoot, - DataSelector: &textSelectorNonexistent, - }, - api.ExportRef{ - Root: carRoot, - FromLocalCAR: sourceCar, - DAGs: []api.DagSpec{{DataSelector: &textSelectorNonexistent}}, - }, - &api.FileRef{}, - ), - fmt.Sprintf("parsing dag spec: path selection does not match a node within %s", carRoot), - ) - - // ensure non-boundary retrievals fail - require.EqualError( - t, - testGenesisRetrieval( - ctx, - client, - api.RetrievalOrder{ - Root: carRoot, - DataSelector: &textSelectorNonLink, - }, - api.ExportRef{ - Root: carRoot, - FromLocalCAR: sourceCar, - DAGs: []api.DagSpec{{DataSelector: &textSelectorNonLink}}, - }, - &api.FileRef{}, - ), - fmt.Sprintf("parsing dag spec: error while locating partial retrieval sub-root: unsupported selection path '%s' does not correspond to a block boundary (a.k.a. CID link)", textSelectorNonLink), - ) -} - -func testGenesisRetrieval(ctx context.Context, client *kit.TestFullNode, retOrder api.RetrievalOrder, eref api.ExportRef, retRef *api.FileRef) error { - - if retOrder.Total.Nil() { - retOrder.Total = big.Zero() - } - if retOrder.UnsealPrice.Nil() { - retOrder.UnsealPrice = big.Zero() - } - - if eref.FromLocalCAR == "" { - rr, err := client.ClientRetrieve(ctx, retOrder) - if err != nil { - return err - } - eref.DealID = rr.DealID - - if err := client.ClientRetrieveWait(ctx, rr.DealID); err != nil { - return xerrors.Errorf("retrieval wait: %w", err) - } - } - - err := client.ClientExport(ctx, eref, *retRef) - if err != nil { - return err - } - - outFile, err := os.Open(retRef.Path) - if err != nil { - return err - } - - defer outFile.Close() //nolint:errcheck - - var data []byte - if !retRef.IsCAR { - - data, err = io.ReadAll(outFile) - if err != nil { - return err - } - - } else { - - cr, err := car.NewCarReader(outFile) - if err != nil { - return err - } - - if len(cr.Header.Roots) != 1 { - return fmt.Errorf("expected a single root in result car, got %d", len(cr.Header.Roots)) - } else if eref.DAGs[0].ExportMerkleProof && cr.Header.Roots[0].String() != carRoot.String() { - return fmt.Errorf("expected root cid '%s', got '%s'", carRoot.String(), cr.Header.Roots[0].String()) - } else if !eref.DAGs[0].ExportMerkleProof && cr.Header.Roots[0].String() != selectedCid.String() { - return fmt.Errorf("expected root cid '%s', got '%s'", selectedCid.String(), cr.Header.Roots[0].String()) - } - - blks := make([]blocks.Block, 0) - for { - b, err := cr.Next() - if err == io.EOF { - break - } else if err != nil { - return err - } - - blks = append(blks, b) - } - - if (eref.DAGs[0].ExportMerkleProof && len(blks) != 3) || (!eref.DAGs[0].ExportMerkleProof && len(blks) != 1) { - return fmt.Errorf("expected a car file with 3/1 blocks, got one with %d instead", len(blks)) - } - - data = blks[len(blks)-1].RawData() - } - - if string(data) != expectedResult { - return fmt.Errorf("retrieved data mismatch: expected '%s' got '%s'", expectedResult, data) - } - - return nil -} diff --git a/itests/deals_power_test.go b/itests/deals_power_test.go deleted file mode 100644 index 57483cde716..00000000000 --- a/itests/deals_power_test.go +++ /dev/null @@ -1,70 +0,0 @@ -// stm: #integration -package itests - -import ( - "context" - "testing" - "time" - - "github.com/filecoin-project/lotus/itests/kit" -) - -func TestFirstDealEnablesMining(t *testing.T) { - //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, - //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 - //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 - //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 - - //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 - // test making a deal with a fresh miner, and see if it starts to mine. - if testing.Short() { - t.Skip("skipping test in short mode") - } - - kit.QuietMiningLogs() - - var ( - client kit.TestFullNode - genMiner kit.TestMiner // bootstrap - provider kit.TestMiner // no sectors, will need to create one - ) - - ens := kit.NewEnsemble(t, kit.MockProofs()) - ens.FullNode(&client) - ens.Miner(&genMiner, &client, kit.WithAllSubsystems()) - ens.Miner(&provider, &client, kit.WithAllSubsystems(), kit.PresealSectors(0)) - ens.Start().InterconnectAll().BeginMining(50 * time.Millisecond) - - ctx := context.Background() - - dh := kit.NewDealHarness(t, &client, &provider, &provider) - - ref, _ := client.CreateImportFile(ctx, 5, 0) - - t.Log("FILE CID:", ref.Root) - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - // start a goroutine to monitor head changes from the client - // once the provider has mined a block, thanks to the power acquired from the deal, - // we pass the test. - providerMined := make(chan struct{}) - - go func() { - _ = client.WaitTillChain(ctx, kit.BlocksMinedByAll(provider.ActorAddr)) - close(providerMined) - }() - - // now perform the deal. - dp := dh.DefaultStartDealParams() - dp.Data.Root = ref.Root - deal := dh.StartDeal(ctx, dp) - - // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this - time.Sleep(time.Second) - - dh.WaitDealSealed(ctx, deal, false, false, nil) - - <-providerMined -} diff --git a/itests/deals_pricing_test.go b/itests/deals_pricing_test.go deleted file mode 100644 index f2301eee8dc..00000000000 --- a/itests/deals_pricing_test.go +++ /dev/null @@ -1,150 +0,0 @@ -// stm: #integration -package itests - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/itests/kit" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -func TestQuotePriceForUnsealedRetrieval(t *testing.T) { - //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, - //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 - //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 - //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 - - //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 - var ( - ctx = context.Background() - blocktime = 50 * time.Millisecond - ) - - kit.QuietMiningLogs() - - client, miner, ens := kit.EnsembleMinimal(t) - ens.InterconnectAll().BeginMiningMustPost(blocktime) - - var ( - ppb = int64(1) - unsealPrice = int64(77) - ) - - // Set unsealed price to non-zero - ask, err := miner.MarketGetRetrievalAsk(ctx) - require.NoError(t, err) - ask.PricePerByte = abi.NewTokenAmount(ppb) - ask.UnsealPrice = abi.NewTokenAmount(unsealPrice) - err = miner.MarketSetRetrievalAsk(ctx, ask) - require.NoError(t, err) - - dh := kit.NewDealHarness(t, client, miner, miner) - - deal1, res1, _ := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{Rseed: 6}) - - // one more storage deal for the same data - _, res2, _ := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{Rseed: 6}) - require.Equal(t, res1.Root, res2.Root) - - //stm: @CLIENT_STORAGE_DEALS_GET_001 - // Retrieval - dealInfo, err := client.ClientGetDealInfo(ctx, *deal1) - require.NoError(t, err) - - //stm: @CLIENT_RETRIEVAL_FIND_001 - // fetch quote -> zero for unsealed price since unsealed file already exists. - offers, err := client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID) - require.NoError(t, err) - require.Len(t, offers, 2) - require.Equal(t, offers[0], offers[1]) - require.Equal(t, uint64(0), offers[0].UnsealPrice.Uint64()) - require.Equal(t, dealInfo.Size*uint64(ppb), offers[0].MinPrice.Uint64()) - - // remove ONLY one unsealed file - //stm: @STORAGE_LIST_001, @MINER_SECTOR_LIST_001 - ss, err := miner.StorageList(context.Background()) - require.NoError(t, err) - _, err = miner.SectorsListNonGenesis(ctx) - require.NoError(t, err) - - //stm: @STORAGE_DROP_SECTOR_001, @STORAGE_LIST_001 -iLoop: - for storeID, sd := range ss { - for _, sector := range sd { - err := miner.StorageDropSector(ctx, storeID, sector.SectorID, storiface.FTUnsealed) - require.NoError(t, err) - break iLoop // remove ONLY one - } - } - - //stm: @CLIENT_RETRIEVAL_FIND_001 - // get retrieval quote -> zero for unsealed price as unsealed file exists. - offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID) - require.NoError(t, err) - require.Len(t, offers, 2) - require.Equal(t, offers[0], offers[1]) - require.Equal(t, uint64(0), offers[0].UnsealPrice.Uint64()) - require.Equal(t, dealInfo.Size*uint64(ppb), offers[0].MinPrice.Uint64()) - - // remove the other unsealed file as well - ss, err = miner.StorageList(context.Background()) - require.NoError(t, err) - _, err = miner.SectorsListNonGenesis(ctx) - require.NoError(t, err) - for storeID, sd := range ss { - for _, sector := range sd { - require.NoError(t, miner.StorageDropSector(ctx, storeID, sector.SectorID, storiface.FTUnsealed)) - } - } - - //stm: @CLIENT_RETRIEVAL_FIND_001 - // fetch quote -> non-zero for unseal price as we no more unsealed files. - offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID) - require.NoError(t, err) - require.Len(t, offers, 2) - require.Equal(t, offers[0], offers[1]) - require.Equal(t, uint64(unsealPrice), offers[0].UnsealPrice.Uint64()) - total := (dealInfo.Size * uint64(ppb)) + uint64(unsealPrice) - require.Equal(t, total, offers[0].MinPrice.Uint64()) -} - -func TestZeroPricePerByteRetrieval(t *testing.T) { - //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, - //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 - //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 - //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 - if testing.Short() { - t.Skip("skipping test in short mode") - } - - kit.QuietMiningLogs() - - var ( - blockTime = 10 * time.Millisecond - startEpoch = abi.ChainEpoch(2 << 12) - ) - - client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs()) - ens.InterconnectAll().BeginMiningMustPost(blockTime) - - ctx := context.Background() - - ask, err := miner.MarketGetRetrievalAsk(ctx) - require.NoError(t, err) - - ask.PricePerByte = abi.NewTokenAmount(0) - err = miner.MarketSetRetrievalAsk(ctx, ask) - require.NoError(t, err) - - dh := kit.NewDealHarness(t, client, miner, miner) - dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{ - N: 1, - StartEpoch: startEpoch, - }) -} diff --git a/itests/deals_publish_test.go b/itests/deals_publish_test.go deleted file mode 100644 index 43f4eeb0500..00000000000 --- a/itests/deals_publish_test.go +++ /dev/null @@ -1,143 +0,0 @@ -// stm: #integration -package itests - -import ( - "bytes" - "context" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-state-types/abi" - market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors/builtin/market" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/wallet/key" - "github.com/filecoin-project/lotus/itests/kit" - "github.com/filecoin-project/lotus/markets/storageadapter" - "github.com/filecoin-project/lotus/node" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/node/modules" - "github.com/filecoin-project/lotus/storage/ctladdr" -) - -func TestPublishDealsBatching(t *testing.T) { - //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, - //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 - //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 - //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 - - //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 - var ( - ctx = context.Background() - publishPeriod = 10 * time.Second - maxDealsPerMsg = uint64(2) // Set max deals per publish deals message to 2 - startEpoch = abi.ChainEpoch(2 << 12) - ) - - kit.QuietMiningLogs() - - publisherKey, err := key.GenerateKey(types.KTSecp256k1) - require.NoError(t, err) - - opts := node.Options( - node.Override(new(*storageadapter.DealPublisher), - storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{ - Period: publishPeriod, - MaxDealsPerMsg: maxDealsPerMsg, - }), - ), - node.Override(new(*ctladdr.AddressSelector), modules.AddressSelector(&config.MinerAddressConfig{ - DealPublishControl: []string{ - publisherKey.Address.String(), - }, - DisableOwnerFallback: true, - DisableWorkerFallback: true, - })), - ) - - client, miner, ens := kit.EnsembleMinimal(t, kit.Account(publisherKey, types.FromFil(10)), kit.MockProofs(), kit.ConstructorOpts(opts)) - ens.InterconnectAll().BeginMining(10 * time.Millisecond) - - _, err = client.WalletImport(ctx, &publisherKey.KeyInfo) - require.NoError(t, err) - - miner.SetControlAddresses(publisherKey.Address) - - dh := kit.NewDealHarness(t, client, miner, miner) - - // Starts a deal and waits until it's published - runDealTillPublish := func(rseed int) { - res, _ := client.CreateImportFile(ctx, rseed, 0) - - upds, err := client.ClientGetDealUpdates(ctx) - require.NoError(t, err) - - dp := dh.DefaultStartDealParams() - dp.Data.Root = res.Root - dp.DealStartEpoch = startEpoch - dh.StartDeal(ctx, dp) - - // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this - time.Sleep(time.Second) - - done := make(chan struct{}) - go func() { - for upd := range upds { - if upd.DataRef.Root == res.Root && upd.State == storagemarket.StorageDealAwaitingPreCommit { - done <- struct{}{} - } - } - }() - <-done - } - - // Run three deals in parallel - done := make(chan struct{}, maxDealsPerMsg+1) - for rseed := 1; rseed <= 3; rseed++ { - rseed := rseed - go func() { - runDealTillPublish(rseed) - done <- struct{}{} - }() - } - - // Wait for two of the deals to be published - for i := 0; i < int(maxDealsPerMsg); i++ { - <-done - } - - // Expect a single PublishStorageDeals message that includes the first two deals - //stm: @CHAIN_STATE_LIST_MESSAGES_001 - msgCids, err := client.StateListMessages(ctx, &api.MessageMatch{To: market.Address}, types.EmptyTSK, 1) - require.NoError(t, err) - count := 0 - for _, msgCid := range msgCids { - msg, err := client.ChainGetMessage(ctx, msgCid) - require.NoError(t, err) - - if msg.Method == market.Methods.PublishStorageDeals { - count++ - var pubDealsParams market2.PublishStorageDealsParams - err = pubDealsParams.UnmarshalCBOR(bytes.NewReader(msg.Params)) - require.NoError(t, err) - require.Len(t, pubDealsParams.Deals, int(maxDealsPerMsg)) - require.Equal(t, publisherKey.Address.String(), msg.From.String()) - } - } - require.Equal(t, 1, count) - - // The third deal should be published once the publish period expires. - // Allow a little padding as it takes a moment for the state change to - // be noticed by the client. - padding := 10 * time.Second - select { - case <-time.After(publishPeriod + padding): - require.Fail(t, "Expected 3rd deal to be published once publish period elapsed") - case <-done: // Success - } -} diff --git a/itests/deals_remote_retrieval_test.go b/itests/deals_remote_retrieval_test.go deleted file mode 100644 index c0a37e69e33..00000000000 --- a/itests/deals_remote_retrieval_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package itests - -import ( - "bytes" - "context" - "fmt" - "io" - "net/url" - "os" - "path" - "testing" - "time" - - "github.com/google/uuid" - "github.com/gorilla/websocket" - "github.com/ipld/go-car" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/api" - bstore "github.com/filecoin-project/lotus/blockstore" - "github.com/filecoin-project/lotus/itests/kit" -) - -func TestNetStoreRetrieval(t *testing.T) { - kit.QuietMiningLogs() - - blocktime := 5 * time.Millisecond - ctx := context.Background() - - full, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC()) - ens.InterconnectAll().BeginMining(blocktime) - - time.Sleep(5 * time.Second) - - // For these tests where the block time is artificially short, just use - // a deal start epoch that is guaranteed to be far enough in the future - // so that the deal starts sealing in time - dealStartEpoch := abi.ChainEpoch(2 << 12) - - rseed := 7 - - dh := kit.NewDealHarness(t, full, miner, miner) - dealCid, res, _ := dh.MakeOnlineDeal(context.Background(), kit.MakeFullDealParams{ - Rseed: rseed, - StartEpoch: dealStartEpoch, - UseCARFileForStorageDeal: true, - }) - - // create deal store - id := uuid.New() - rstore := bstore.NewMemorySync() - - au, err := url.Parse(full.ListenURL) - require.NoError(t, err) - - switch au.Scheme { - case "http": - au.Scheme = "ws" - case "https": - au.Scheme = "wss" - } - - au.Path = path.Join(au.Path, "/rest/v0/store/"+id.String()) - - conn, _, err := websocket.DefaultDialer.Dial(au.String(), nil) - require.NoError(t, err) - - _ = bstore.HandleNetBstoreWS(ctx, rstore, conn) - - dh.PerformRetrievalWithOrder(ctx, dealCid, res.Root, false, func(offer api.QueryOffer, address address.Address) api.RetrievalOrder { - order := offer.Order(address) - - order.RemoteStore = &id - - return order - }) - - // check blockstore blocks - carv1FilePath, _ := kit.CreateRandomCARv1(t, rseed, 200) - cb, err := os.ReadFile(carv1FilePath) - require.NoError(t, err) - - cr, err := car.NewCarReader(bytes.NewReader(cb)) - require.NoError(t, err) - - var blocks int - for { - cb, err := cr.Next() - if err == io.EOF { - fmt.Println("blocks: ", blocks) - return - } - require.NoError(t, err) - - sb, err := rstore.Get(ctx, cb.Cid()) - require.NoError(t, err) - require.EqualValues(t, cb.RawData(), sb.RawData()) - - blocks++ - } -} diff --git a/itests/deals_retry_deal_no_funds_test.go b/itests/deals_retry_deal_no_funds_test.go deleted file mode 100644 index 650b2436e0e..00000000000 --- a/itests/deals_retry_deal_no_funds_test.go +++ /dev/null @@ -1,188 +0,0 @@ -// stm: #integration -package itests - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/wallet/key" - "github.com/filecoin-project/lotus/itests/kit" - "github.com/filecoin-project/lotus/markets/storageadapter" - "github.com/filecoin-project/lotus/node" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/node/modules" - "github.com/filecoin-project/lotus/storage/ctladdr" -) - -var ( - publishPeriod = 1 * time.Second - maxDealsPerMsg = uint64(2) // Set max deals per publish deals message to 2 - - blockTime = 3 * time.Millisecond -) - -func TestDealsRetryLackOfFunds(t *testing.T) { - t.Run("cover-gas", func(t *testing.T) { - testDealsRetryLackOfFunds(t, types.NewInt(1020000000000)) - }) - t.Run("empty", func(t *testing.T) { - testDealsRetryLackOfFunds(t, types.NewInt(1)) - }) -} - -func testDealsRetryLackOfFunds(t *testing.T, publishStorageAccountFunds abi.TokenAmount) { - //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, - //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 - //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 - //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 - - //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 - //stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001 - ctx := context.Background() - - kit.QuietMiningLogs() - - // Allow 8MB sectors - eightMBSectorsOpt := kit.SectorSize(8 << 20) - - publishStorageDealKey, err := key.GenerateKey(types.KTSecp256k1) - require.NoError(t, err) - - opts := node.Options( - node.Override(new(*storageadapter.DealPublisher), - storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{ - Period: publishPeriod, - MaxDealsPerMsg: maxDealsPerMsg, - }), - ), - node.Override(new(*ctladdr.AddressSelector), modules.AddressSelector(&config.MinerAddressConfig{ - DealPublishControl: []string{ - publishStorageDealKey.Address.String(), - }, - DisableOwnerFallback: true, - DisableWorkerFallback: true, - })), - ) - - minerFullNode, clientFullNode, miner, ens := kit.EnsembleTwoOne(t, kit.Account(publishStorageDealKey, publishStorageAccountFunds), kit.ConstructorOpts(opts), kit.MockProofs(), eightMBSectorsOpt) - - kit.QuietMiningLogs() - - ens. - Start(). - InterconnectAll(). - BeginMining(blockTime) - - _, err = minerFullNode.WalletImport(ctx, &publishStorageDealKey.KeyInfo) - require.NoError(t, err) - - miner.SetControlAddresses(publishStorageDealKey.Address) - - dh := kit.NewDealHarness(t, clientFullNode, miner, miner) - - res, _ := clientFullNode.CreateImportFile(ctx, 0, 4<<20) // 4MiB file. - list, err := clientFullNode.ClientListImports(ctx) - require.NoError(t, err) - require.Len(t, list, 1) - require.Equal(t, res.Root, *list[0].Root) - - dp := dh.DefaultStartDealParams() - dp.Data.Root = res.Root - dp.FastRetrieval = true - dp.EpochPrice = abi.NewTokenAmount(62500000) // minimum asking price. - deal := dh.StartDeal(ctx, dp) - - propcid := *deal - - go func() { - time.Sleep(30 * time.Second) - - kit.SendFunds(ctx, t, minerFullNode, publishStorageDealKey.Address, types.FromFil(1)) - - err := miner.MarketRetryPublishDeal(ctx, propcid) - if err != nil { - panic(err) - } - }() - - dh.WaitDealSealed(ctx, deal, false, false, nil) -} - -func TestDealsRetryLackOfFunds_blockInPublishDeal(t *testing.T) { - //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, - //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 - //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 - //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 - //stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001 - ctx := context.Background() - kit.QuietMiningLogs() - - // Allow 8MB sectors - eightMBSectorsOpt := kit.SectorSize(8 << 20) - - publishStorageDealKey, err := key.GenerateKey(types.KTSecp256k1) - require.NoError(t, err) - - opts := node.Options( - node.Override(new(*storageadapter.DealPublisher), - storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{ - Period: publishPeriod, - MaxDealsPerMsg: maxDealsPerMsg, - }), - ), - node.Override(new(*ctladdr.AddressSelector), modules.AddressSelector(&config.MinerAddressConfig{ - DealPublishControl: []string{ - publishStorageDealKey.Address.String(), - }, - DisableOwnerFallback: true, - DisableWorkerFallback: true, - })), - ) - - publishStorageAccountFunds := types.NewInt(1020000000000) - minerFullNode, clientFullNode, miner, ens := kit.EnsembleTwoOne(t, kit.Account(publishStorageDealKey, publishStorageAccountFunds), kit.ConstructorOpts(opts), kit.MockProofs(), eightMBSectorsOpt) - - kit.QuietMiningLogs() - - ens. - Start(). - InterconnectAll(). - BeginMining(blockTime) - - _, err = minerFullNode.WalletImport(ctx, &publishStorageDealKey.KeyInfo) - require.NoError(t, err) - - miner.SetControlAddresses(publishStorageDealKey.Address) - - dh := kit.NewDealHarness(t, clientFullNode, miner, miner) - - res, _ := clientFullNode.CreateImportFile(ctx, 0, 4<<20) // 4MiB file. - list, err := clientFullNode.ClientListImports(ctx) - require.NoError(t, err) - require.Len(t, list, 1) - require.Equal(t, res.Root, *list[0].Root) - - dp := dh.DefaultStartDealParams() - dp.Data.Root = res.Root - dp.FastRetrieval = true - dp.EpochPrice = abi.NewTokenAmount(62500000) // minimum asking price. - deal := dh.StartDeal(ctx, dp) - - dealSealed := make(chan struct{}) - go func() { - dh.WaitDealSealedQuiet(ctx, deal, false, false, nil) - dealSealed <- struct{}{} - }() - - select { - case <-dealSealed: - t.Fatal("deal shouldn't have sealed") - case <-time.After(time.Second * 15): - } -} diff --git a/itests/deals_test.go b/itests/deals_test.go deleted file mode 100644 index a6953d07e69..00000000000 --- a/itests/deals_test.go +++ /dev/null @@ -1,45 +0,0 @@ -// stm: #integration -package itests - -import ( - "testing" - "time" - - "github.com/filecoin-project/lotus/itests/kit" -) - -func TestDealsWithSealingAndRPC(t *testing.T) { - //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, - //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 - //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 - //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 - - //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 - if testing.Short() { - t.Skip("skipping test in short mode") - } - - kit.QuietMiningLogs() - - client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.WithAllSubsystems()) // no mock proofs. - ens.InterconnectAll().BeginMiningMustPost(250 * time.Millisecond) - dh := kit.NewDealHarness(t, client, miner, miner) - - t.Run("stdretrieval", func(t *testing.T) { - dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1}) - }) - - t.Run("fastretrieval", func(t *testing.T) { - dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1, FastRetrieval: true}) - }) - - t.Run("fastretrieval-twodeals-sequential", func(t *testing.T) { - dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1, FastRetrieval: true}) - dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1, FastRetrieval: true}) - }) - - t.Run("stdretrieval-carv1", func(t *testing.T) { - dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1, UseCARFileForStorageDeal: true}) - }) - -} diff --git a/itests/eth_account_abstraction_test.go b/itests/eth_account_abstraction_test.go index 5ca672674a6..58e122e1603 100644 --- a/itests/eth_account_abstraction_test.go +++ b/itests/eth_account_abstraction_test.go @@ -74,7 +74,7 @@ func TestEthAccountAbstraction(t *testing.T) { msgFromPlaceholder, err = client.GasEstimateMessageGas(ctx, msgFromPlaceholder, nil, types.EmptyTSK) require.NoError(t, err) - txArgs, err := ethtypes.EthTxArgsFromUnsignedEthMessage(msgFromPlaceholder) + txArgs, err := ethtypes.Eth1559TxArgsFromUnsignedFilecoinMessage(msgFromPlaceholder) require.NoError(t, err) digest, err := txArgs.ToRlpUnsignedMsg() @@ -111,7 +111,7 @@ func TestEthAccountAbstraction(t *testing.T) { msgFromPlaceholder, err = client.GasEstimateMessageGas(ctx, msgFromPlaceholder, nil, types.EmptyTSK) require.NoError(t, err) - txArgs, err = ethtypes.EthTxArgsFromUnsignedEthMessage(msgFromPlaceholder) + txArgs, err = ethtypes.Eth1559TxArgsFromUnsignedFilecoinMessage(msgFromPlaceholder) require.NoError(t, err) digest, err = txArgs.ToRlpUnsignedMsg() @@ -185,7 +185,7 @@ func TestEthAccountAbstractionFailure(t *testing.T) { require.NoError(t, err) msgFromPlaceholder.Value = abi.TokenAmount(types.MustParseFIL("1000")) - txArgs, err := ethtypes.EthTxArgsFromUnsignedEthMessage(msgFromPlaceholder) + txArgs, err := ethtypes.Eth1559TxArgsFromUnsignedFilecoinMessage(msgFromPlaceholder) require.NoError(t, err) digest, err := txArgs.ToRlpUnsignedMsg() @@ -224,7 +224,7 @@ func TestEthAccountAbstractionFailure(t *testing.T) { msgFromPlaceholder, err = client.GasEstimateMessageGas(ctx, msgFromPlaceholder, nil, types.EmptyTSK) require.NoError(t, err) - txArgs, err = ethtypes.EthTxArgsFromUnsignedEthMessage(msgFromPlaceholder) + txArgs, err = ethtypes.Eth1559TxArgsFromUnsignedFilecoinMessage(msgFromPlaceholder) require.NoError(t, err) digest, err = txArgs.ToRlpUnsignedMsg() @@ -285,7 +285,7 @@ func TestEthAccountAbstractionFailsFromEvmActor(t *testing.T) { maxPriorityFeePerGas, err := client.EthMaxPriorityFeePerGas(ctx) require.NoError(t, err) - tx := ethtypes.EthTxArgs{ + tx := ethtypes.Eth1559TxArgs{ ChainID: build.Eip155ChainId, Value: big.Zero(), Nonce: 0, @@ -302,7 +302,7 @@ func TestEthAccountAbstractionFailsFromEvmActor(t *testing.T) { client.EVM().SubmitTransaction(ctx, &tx) - smsg, err := tx.ToSignedMessage() + smsg, err := ethtypes.ToSignedFilecoinMessage(&tx) require.NoError(t, err) ml, err := client.StateWaitMsg(ctx, smsg.Cid(), 1, api.LookbackNoLimit, true) diff --git a/itests/eth_conformance_test.go b/itests/eth_conformance_test.go index 9c1b2ae34ef..13d42e741cd 100644 --- a/itests/eth_conformance_test.go +++ b/itests/eth_conformance_test.go @@ -463,7 +463,7 @@ func createRawSignedEthTx(ctx context.Context, t *testing.T, client *kit.TestFul maxPriorityFeePerGas, err := client.EthMaxPriorityFeePerGas(ctx) require.NoError(t, err) - tx := ethtypes.EthTxArgs{ + tx := ethtypes.Eth1559TxArgs{ ChainID: build.Eip155ChainId, Value: big.NewInt(100), Nonce: 0, diff --git a/itests/eth_deploy_test.go b/itests/eth_deploy_test.go index 68861f98fd8..b3a4c26f7f7 100644 --- a/itests/eth_deploy_test.go +++ b/itests/eth_deploy_test.go @@ -73,7 +73,7 @@ func TestDeployment(t *testing.T) { require.NoError(t, err) // now deploy a contract from the placeholder, and validate it went well - tx := ethtypes.EthTxArgs{ + tx := ethtypes.Eth1559TxArgs{ ChainID: build.Eip155ChainId, Value: big.Zero(), Nonce: 0, diff --git a/itests/eth_hash_lookup_test.go b/itests/eth_hash_lookup_test.go index 5edd61d4e8f..1610e245826 100644 --- a/itests/eth_hash_lookup_test.go +++ b/itests/eth_hash_lookup_test.go @@ -61,7 +61,7 @@ func TestTransactionHashLookup(t *testing.T) { require.NoError(t, err) // now deploy a contract from the embryo, and validate it went well - tx := ethtypes.EthTxArgs{ + tx := ethtypes.Eth1559TxArgs{ ChainID: build.Eip155ChainId, Value: big.Zero(), Nonce: 0, @@ -367,7 +367,7 @@ func TestEthGetMessageCidByTransactionHashEthTx(t *testing.T) { require.NoError(t, err) // now deploy a contract from the embryo, and validate it went well - tx := ethtypes.EthTxArgs{ + tx := ethtypes.Eth1559TxArgs{ ChainID: build.Eip155ChainId, Value: big.Zero(), Nonce: 0, @@ -385,7 +385,7 @@ func TestEthGetMessageCidByTransactionHashEthTx(t *testing.T) { sender, err := tx.Sender() require.NoError(t, err) - unsignedMessage, err := tx.ToUnsignedMessage(sender) + unsignedMessage, err := tx.ToUnsignedFilecoinMessage(sender) require.NoError(t, err) rawTxHash, err := tx.TxHash() diff --git a/itests/eth_legacy_transactions_test.go b/itests/eth_legacy_transactions_test.go new file mode 100644 index 00000000000..791f6794d9b --- /dev/null +++ b/itests/eth_legacy_transactions_test.go @@ -0,0 +1,400 @@ +package itests + +import ( + "context" + "encoding/hex" + "encoding/json" + "fmt" + "os" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/consensus/filcns" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/types/ethtypes" + "github.com/filecoin-project/lotus/itests/kit" +) + +func TestLegacyValueTransferValidSignature(t *testing.T) { + blockTime := 100 * time.Millisecond + client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC()) + + ens.InterconnectAll().BeginMining(blockTime) + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + // create a new Ethereum account + key, ethAddr, deployer := client.EVM().NewAccount() + _, ethAddr2, _ := client.EVM().NewAccount() + + kit.SendFunds(ctx, t, client, deployer, types.FromFil(1000)) + + gasParams, err := json.Marshal(ethtypes.EthEstimateGasParams{Tx: ethtypes.EthCall{ + From: ðAddr, + To: ðAddr2, + Value: ethtypes.EthBigInt(big.NewInt(100)), + }}) + require.NoError(t, err) + + gaslimit, err := client.EthEstimateGas(ctx, gasParams) + require.NoError(t, err) + fmt.Println("gas limit is", gaslimit) + + tx := ethtypes.EthLegacyHomesteadTxArgs{ + Value: big.NewInt(100), + Nonce: 0, + To: ðAddr2, + GasPrice: types.NanoFil, + GasLimit: int(gaslimit), + V: big.Zero(), + R: big.Zero(), + S: big.Zero(), + } + + client.EVM().SignLegacyHomesteadTransaction(&tx, key.PrivateKey) + // Mangle signature + tx.V.Int.Xor(tx.V.Int, big.NewInt(1).Int) + + signed, err := tx.ToRlpSignedMsg() + require.NoError(t, err) + // Submit transaction with bad signature + _, err = client.EVM().EthSendRawTransaction(ctx, signed) + require.Error(t, err) + + // Submit transaction with valid signature + client.EVM().SignLegacyHomesteadTransaction(&tx, key.PrivateKey) + + hash := client.EVM().SubmitTransaction(ctx, &tx) + + receipt, err := client.EVM().WaitTransaction(ctx, hash) + require.NoError(t, err) + require.NotNil(t, receipt) + require.EqualValues(t, ethAddr, receipt.From) + require.EqualValues(t, ethAddr2, *receipt.To) + require.EqualValues(t, hash, receipt.TransactionHash) + + // Success. + require.EqualValues(t, ethtypes.EthUint64(0x1), receipt.Status) + + // Validate that we sent the expected transaction. + ethTx, err := client.EthGetTransactionByHash(ctx, &hash) + require.NoError(t, err) + require.Nil(t, ethTx.MaxPriorityFeePerGas) + require.Nil(t, ethTx.MaxFeePerGas) + + require.EqualValues(t, ethAddr, ethTx.From) + require.EqualValues(t, ethAddr2, *ethTx.To) + require.EqualValues(t, tx.Nonce, ethTx.Nonce) + require.EqualValues(t, hash, ethTx.Hash) + require.EqualValues(t, tx.Value, ethTx.Value) + require.EqualValues(t, 0, ethTx.Type) + require.EqualValues(t, 0, ethTx.ChainID) + require.EqualValues(t, ethtypes.EthBytes{}, ethTx.Input) + require.EqualValues(t, tx.GasLimit, ethTx.Gas) + require.EqualValues(t, tx.GasPrice, *ethTx.GasPrice) + require.EqualValues(t, tx.R, ethTx.R) + require.EqualValues(t, tx.S, ethTx.S) + require.EqualValues(t, tx.V, ethTx.V) +} + +func TestLegacyEIP155ValueTransferValidSignatureFailsNV22(t *testing.T) { + blockTime := 100 * time.Millisecond + + nv23Height := 10 + // We will move to NV23 at epoch 10 + client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC(), kit.UpgradeSchedule(stmgr.Upgrade{ + Network: network.Version22, + Height: -1, + }, stmgr.Upgrade{ + Network: network.Version23, + Height: abi.ChainEpoch(nv23Height), + Migration: filcns.UpgradeActorsV13, + })) + + ens.InterconnectAll().BeginMining(blockTime) + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + // create a new Ethereum account + key, ethAddr, deployer := client.EVM().NewAccount() + _, ethAddr2, _ := client.EVM().NewAccount() + + kit.SendFunds(ctx, t, client, deployer, types.FromFil(1000)) + + gasParams, err := json.Marshal(ethtypes.EthEstimateGasParams{Tx: ethtypes.EthCall{ + From: ðAddr, + To: ðAddr2, + Value: ethtypes.EthBigInt(big.NewInt(100)), + }}) + require.NoError(t, err) + + gaslimit, err := client.EthEstimateGas(ctx, gasParams) + require.NoError(t, err) + + legacyTx := ðtypes.EthLegacyHomesteadTxArgs{ + Value: big.NewInt(100), + Nonce: 0, + To: ðAddr2, + GasPrice: types.NanoFil, + GasLimit: int(gaslimit), + V: big.Zero(), + R: big.Zero(), + S: big.Zero(), + } + tx := ethtypes.NewEthLegacy155TxArgs(legacyTx) + + // TX will fail as we're still at NV22 + client.EVM().SignLegacyEIP155Transaction(tx, key.PrivateKey, big.NewInt(build.Eip155ChainId)) + + signed, err := tx.ToRawTxBytesSigned() + require.NoError(t, err) + + _, err = client.EVM().EthSendRawTransaction(ctx, signed) + require.Error(t, err) + require.Contains(t, err.Error(), "network version should be atleast NV23 for sending legacy ETH transactions") +} + +func TestLegacyEIP155ValueTransferValidSignature(t *testing.T) { + blockTime := 100 * time.Millisecond + client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC()) + + ens.InterconnectAll().BeginMining(blockTime) + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + // create a new Ethereum account + key, ethAddr, deployer := client.EVM().NewAccount() + _, ethAddr2, _ := client.EVM().NewAccount() + + kit.SendFunds(ctx, t, client, deployer, types.FromFil(1000)) + + gasParams, err := json.Marshal(ethtypes.EthEstimateGasParams{Tx: ethtypes.EthCall{ + From: ðAddr, + To: ðAddr2, + Value: ethtypes.EthBigInt(big.NewInt(100)), + }}) + require.NoError(t, err) + + gaslimit, err := client.EthEstimateGas(ctx, gasParams) + require.NoError(t, err) + + legacyTx := ðtypes.EthLegacyHomesteadTxArgs{ + Value: big.NewInt(100), + Nonce: 0, + To: ðAddr2, + GasPrice: types.NanoFil, + GasLimit: int(gaslimit), + V: big.Zero(), + R: big.Zero(), + S: big.Zero(), + } + tx := ethtypes.NewEthLegacy155TxArgs(legacyTx) + + client.EVM().SignLegacyEIP155Transaction(tx, key.PrivateKey, big.NewInt(build.Eip155ChainId)) + // Mangle signature + innerTx := tx.GetLegacyTx() + innerTx.V.Int.Xor(innerTx.V.Int, big.NewInt(1).Int) + + signed, err := tx.ToRawTxBytesSigned() + require.NoError(t, err) + // Submit transaction with bad signature + _, err = client.EVM().EthSendRawTransaction(ctx, signed) + require.Error(t, err) + + // Submit transaction with valid signature but incorrect chain ID + client.EVM().SignLegacyEIP155Transaction(tx, key.PrivateKey, big.NewInt(build.Eip155ChainId)) + + signed, err = tx.ToRawTxBytesSigned() + require.NoError(t, err) + + hash, err := client.EVM().EthSendRawTransaction(ctx, signed) + require.NoError(t, err) + + receipt, err := client.EVM().WaitTransaction(ctx, hash) + require.NoError(t, err) + require.NotNil(t, receipt) + require.EqualValues(t, ethAddr, receipt.From) + require.EqualValues(t, ethAddr2, *receipt.To) + require.EqualValues(t, hash, receipt.TransactionHash) + + // Success. + require.EqualValues(t, ethtypes.EthUint64(0x1), receipt.Status) + + // Validate that we sent the expected transaction. + ethTx, err := client.EthGetTransactionByHash(ctx, &hash) + require.NoError(t, err) + require.Nil(t, ethTx.MaxPriorityFeePerGas) + require.Nil(t, ethTx.MaxFeePerGas) + + innerTx = tx.GetLegacyTx() + require.EqualValues(t, ethAddr, ethTx.From) + require.EqualValues(t, ethAddr2, *ethTx.To) + require.EqualValues(t, innerTx.Nonce, ethTx.Nonce) + require.EqualValues(t, hash, ethTx.Hash) + require.EqualValues(t, innerTx.Value, ethTx.Value) + require.EqualValues(t, 0, ethTx.Type) + require.EqualValues(t, build.Eip155ChainId, ethTx.ChainID) + require.EqualValues(t, ethtypes.EthBytes{}, ethTx.Input) + require.EqualValues(t, innerTx.GasLimit, ethTx.Gas) + require.EqualValues(t, innerTx.GasPrice, *ethTx.GasPrice) + require.EqualValues(t, innerTx.R, ethTx.R) + require.EqualValues(t, innerTx.S, ethTx.S) + require.EqualValues(t, innerTx.V, ethTx.V) +} + +func TestLegacyContractInvocation(t *testing.T) { + blockTime := 100 * time.Millisecond + client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC()) + + ens.InterconnectAll().BeginMining(blockTime) + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + // create a new Ethereum account + key, ethAddr, deployer := client.EVM().NewAccount() + // send some funds to the f410 address + kit.SendFunds(ctx, t, client, deployer, types.FromFil(10)) + + // DEPLOY CONTRACT + tx, err := deployLegacyContractTx(ctx, t, client, ethAddr) + require.NoError(t, err) + + client.EVM().SignLegacyHomesteadTransaction(tx, key.PrivateKey) + // Mangle signature + tx.V.Int.Xor(tx.V.Int, big.NewInt(1).Int) + + signed, err := tx.ToRlpSignedMsg() + require.NoError(t, err) + // Submit transaction with bad signature + _, err = client.EVM().EthSendRawTransaction(ctx, signed) + require.Error(t, err) + + // Submit transaction with valid signature + client.EVM().SignLegacyHomesteadTransaction(tx, key.PrivateKey) + + hash := client.EVM().SubmitTransaction(ctx, tx) + + receipt, err := client.EVM().WaitTransaction(ctx, hash) + require.NoError(t, err) + require.NotNil(t, receipt) + require.EqualValues(t, ethtypes.EthUint64(0x1), receipt.Status) + + // Get contract address. + contractAddr := client.EVM().ComputeContractAddress(ethAddr, 0) + + // INVOKE CONTRACT + + // Params + // entry point for getBalance - f8b2cb4f + // address - ff00000000000000000000000000000000000064 + params, err := hex.DecodeString("f8b2cb4f000000000000000000000000ff00000000000000000000000000000000000064") + require.NoError(t, err) + + gasParams, err := json.Marshal(ethtypes.EthEstimateGasParams{Tx: ethtypes.EthCall{ + From: ðAddr, + To: &contractAddr, + Data: params, + }}) + require.NoError(t, err) + + gaslimit, err := client.EthEstimateGas(ctx, gasParams) + require.NoError(t, err) + + maxPriorityFeePerGas, err := client.EthMaxPriorityFeePerGas(ctx) + require.NoError(t, err) + + invokeTx := ethtypes.EthLegacyHomesteadTxArgs{ + To: &contractAddr, + Value: big.Zero(), + Nonce: 1, + GasPrice: big.Int(maxPriorityFeePerGas), + GasLimit: int(gaslimit), + Input: params, + V: big.Zero(), + R: big.Zero(), + S: big.Zero(), + } + + client.EVM().SignLegacyHomesteadTransaction(&invokeTx, key.PrivateKey) + // Mangle signature + invokeTx.V.Int.Xor(invokeTx.V.Int, big.NewInt(1).Int) + + signed, err = invokeTx.ToRlpSignedMsg() + require.NoError(t, err) + // Submit transaction with bad signature + _, err = client.EVM().EthSendRawTransaction(ctx, signed) + require.Error(t, err) + + // Submit transaction with valid signature + client.EVM().SignLegacyHomesteadTransaction(&invokeTx, key.PrivateKey) + hash = client.EVM().SubmitTransaction(ctx, &invokeTx) + + receipt, err = client.EVM().WaitTransaction(ctx, hash) + require.NoError(t, err) + require.NotNil(t, receipt) + + // Success. + require.EqualValues(t, ethtypes.EthUint64(0x1), receipt.Status) + + // Validate that we correctly computed the gas outputs. + mCid, err := client.EthGetMessageCidByTransactionHash(ctx, &hash) + require.NoError(t, err) + require.NotNil(t, mCid) + + invokResult, err := client.StateReplay(ctx, types.EmptyTSK, *mCid) + require.NoError(t, err) + require.EqualValues(t, invokResult.GasCost.GasUsed, big.NewInt(int64(receipt.GasUsed))) + effectiveGasPrice := big.Div(invokResult.GasCost.TotalCost, invokResult.GasCost.GasUsed) + require.EqualValues(t, effectiveGasPrice, big.Int(receipt.EffectiveGasPrice)) +} + +func deployLegacyContractTx(ctx context.Context, t *testing.T, client *kit.TestFullNode, ethAddr ethtypes.EthAddress) (*ethtypes.EthLegacyHomesteadTxArgs, error) { + // install contract + contractHex, err := os.ReadFile("./contracts/SimpleCoin.hex") + require.NoError(t, err) + + contract, err := hex.DecodeString(string(contractHex)) + require.NoError(t, err) + + gasParams, err := json.Marshal(ethtypes.EthEstimateGasParams{Tx: ethtypes.EthCall{ + From: ðAddr, + Data: contract, + }}) + if err != nil { + return nil, err + } + + gaslimit, err := client.EthEstimateGas(ctx, gasParams) + if err != nil { + return nil, err + } + + maxPriorityFeePerGas, err := client.EthMaxPriorityFeePerGas(ctx) + if err != nil { + return nil, err + } + + // now deploy a contract from the embryo, and validate it went well + return ðtypes.EthLegacyHomesteadTxArgs{ + Value: big.Zero(), + Nonce: 0, + GasPrice: big.Int(maxPriorityFeePerGas), + GasLimit: int(gaslimit), + Input: contract, + V: big.Zero(), + R: big.Zero(), + S: big.Zero(), + }, nil +} diff --git a/itests/eth_transactions_test.go b/itests/eth_transactions_test.go index 9e9fb7b87c4..3cb11d4a3a6 100644 --- a/itests/eth_transactions_test.go +++ b/itests/eth_transactions_test.go @@ -64,7 +64,7 @@ func TestValueTransferValidSignature(t *testing.T) { maxPriorityFeePerGas, err := client.EthMaxPriorityFeePerGas(ctx) require.NoError(t, err) - tx := ethtypes.EthTxArgs{ + tx := ethtypes.Eth1559TxArgs{ ChainID: build.Eip155ChainId, Value: big.NewInt(100), Nonce: 0, @@ -113,30 +113,13 @@ func TestValueTransferValidSignature(t *testing.T) { require.EqualValues(t, 2, ethTx.Type) require.EqualValues(t, ethtypes.EthBytes{}, ethTx.Input) require.EqualValues(t, tx.GasLimit, ethTx.Gas) - require.EqualValues(t, tx.MaxFeePerGas, ethTx.MaxFeePerGas) - require.EqualValues(t, tx.MaxPriorityFeePerGas, ethTx.MaxPriorityFeePerGas) + require.EqualValues(t, tx.MaxFeePerGas, *ethTx.MaxFeePerGas) + require.EqualValues(t, tx.MaxPriorityFeePerGas, *ethTx.MaxPriorityFeePerGas) require.EqualValues(t, tx.V, ethTx.V) require.EqualValues(t, tx.R, ethTx.R) require.EqualValues(t, tx.S, ethTx.S) } -func TestLegacyTransaction(t *testing.T) { - blockTime := 100 * time.Millisecond - client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC()) - - ens.InterconnectAll().BeginMining(blockTime) - - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - - // This is a legacy style transaction obtained from etherscan - // Tx details: https://etherscan.io/getRawTx?tx=0x0763262208d89efeeb50c8bb05b50c537903fe9d7bdef3b223fd1f5f69f69b32 - txBytes, err := hex.DecodeString("f86f830131cf8504a817c800825208942cf1e5a8250ded8835694ebeb90cfa0237fcb9b1882ec4a5251d1100008026a0f5f8d2244d619e211eeb634acd1bea0762b7b4c97bba9f01287c82bfab73f911a015be7982898aa7cc6c6f27ff33e999e4119d6cd51330353474b98067ff56d930") - require.NoError(t, err) - _, err = client.EVM().EthSendRawTransaction(ctx, txBytes) - require.ErrorContains(t, err, "legacy transaction is not supported") -} - func TestContractDeploymentValidSignature(t *testing.T) { blockTime := 100 * time.Millisecond client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC()) @@ -255,7 +238,7 @@ func TestContractInvocation(t *testing.T) { maxPriorityFeePerGas, err := client.EthMaxPriorityFeePerGas(ctx) require.NoError(t, err) - invokeTx := ethtypes.EthTxArgs{ + invokeTx := ethtypes.Eth1559TxArgs{ ChainID: build.Eip155ChainId, To: &contractAddr, Value: big.Zero(), @@ -363,7 +346,7 @@ func TestGetBlockByNumber(t *testing.T) { require.Equal(t, types.FromFil(10).Int, bal.Int) } -func deployContractTx(ctx context.Context, client *kit.TestFullNode, ethAddr ethtypes.EthAddress, contract []byte) (*ethtypes.EthTxArgs, error) { +func deployContractTx(ctx context.Context, client *kit.TestFullNode, ethAddr ethtypes.EthAddress, contract []byte) (*ethtypes.Eth1559TxArgs, error) { gasParams, err := json.Marshal(ethtypes.EthEstimateGasParams{Tx: ethtypes.EthCall{ From: ðAddr, Data: contract, @@ -383,7 +366,7 @@ func deployContractTx(ctx context.Context, client *kit.TestFullNode, ethAddr eth } // now deploy a contract from the embryo, and validate it went well - return ðtypes.EthTxArgs{ + return ðtypes.Eth1559TxArgs{ ChainID: build.Eip155ChainId, Value: big.Zero(), Nonce: 0, @@ -607,3 +590,57 @@ func TestEthTxFromNativeAccount_InvalidReceiver(t *testing.T) { require.NoError(t, err) require.EqualValues(t, &expectedTo, tx.To) } + +func TestTraceTransaction(t *testing.T) { + blockTime := 100 * time.Millisecond + client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC()) + + ens.InterconnectAll().BeginMining(blockTime) + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + // install contract + contractHex, err := os.ReadFile("./contracts/SimpleCoin.hex") + require.NoError(t, err) + + contract, err := hex.DecodeString(string(contractHex)) + require.NoError(t, err) + + // create a new Ethereum account + key, ethAddr, deployer := client.EVM().NewAccount() + // send some funds to the f410 address + kit.SendFunds(ctx, t, client, deployer, types.FromFil(10)) + + // DEPLOY CONTRACT + tx, err := deployContractTx(ctx, client, ethAddr, contract) + require.NoError(t, err) + + client.EVM().SignTransaction(tx, key.PrivateKey) + hash := client.EVM().SubmitTransaction(ctx, tx) + + // EthTraceTransaction errors when tx hash is not found + nonExistentTxHash := "0x0000000000000000000000000000000000000000000000000000000000000000" + traces, err := client.EthTraceTransaction(ctx, nonExistentTxHash) + require.Error(t, err) + require.Contains(t, err.Error(), "transaction not found") + require.Nil(t, traces) + + // EthTraceTransaction errors when a trace for pending transactions is requested + traces, err = client.EthTraceTransaction(ctx, hash.String()) + require.Error(t, err) + require.Contains(t, err.Error(), "no trace for pending transactions") + require.Nil(t, traces) + + receipt, err := client.EVM().WaitTransaction(ctx, hash) + require.NoError(t, err) + require.NotNil(t, receipt) + require.EqualValues(t, ethtypes.EthUint64(0x1), receipt.Status) + + // get trace and verify values + traces, err = client.EthTraceTransaction(ctx, hash.String()) + require.NoError(t, err) + require.NotNil(t, traces) + require.EqualValues(t, traces[0].TransactionHash, hash) + require.EqualValues(t, traces[0].BlockNumber, receipt.BlockNumber) +} diff --git a/itests/fevm_test.go b/itests/fevm_test.go index 2dcb8ef1dff..c2b5e2aa9bb 100644 --- a/itests/fevm_test.go +++ b/itests/fevm_test.go @@ -376,7 +376,7 @@ func TestFEVMTestApp(t *testing.T) { } -// TestFEVMTestApp creates a contract that just has a self destruct feature and calls it +// TestFEVMTestConstructor creates a contract that just has a self destruct feature and calls it func TestFEVMTestConstructor(t *testing.T) { ctx, cancel, client := kit.SetupFEVMTest(t) defer cancel() @@ -407,7 +407,7 @@ func TestFEVMAutoSelfDestruct(t *testing.T) { require.NoError(t, err) } -// TestFEVMTestApp creates a contract that just has a self destruct feature and calls it +// TestFEVMTestSendToContract creates a contract that just has a self destruct feature and calls it func TestFEVMTestSendToContract(t *testing.T) { ctx, cancel, client := kit.SetupFEVMTest(t) defer cancel() @@ -678,7 +678,7 @@ func TestFEVMRecursiveActorCallEstimate(t *testing.T) { nonce, err := client.MpoolGetNonce(ctx, ethFilAddr) require.NoError(t, err) - tx := ðtypes.EthTxArgs{ + tx := ðtypes.Eth1559TxArgs{ ChainID: build.Eip155ChainId, To: &contractAddr, Value: big.Zero(), @@ -695,7 +695,7 @@ func TestFEVMRecursiveActorCallEstimate(t *testing.T) { client.EVM().SignTransaction(tx, key.PrivateKey) hash := client.EVM().SubmitTransaction(ctx, tx) - smsg, err := tx.ToSignedMessage() + smsg, err := ethtypes.ToSignedFilecoinMessage(tx) require.NoError(t, err) _, err = client.StateWaitMsg(ctx, smsg.Cid(), 0, 0, false) @@ -834,7 +834,7 @@ func TestFEVMBareTransferTriggersSmartContractLogic(t *testing.T) { maxPriorityFeePerGas, err := client.EthMaxPriorityFeePerGas(ctx) require.NoError(t, err) - tx := ethtypes.EthTxArgs{ + tx := ethtypes.Eth1559TxArgs{ ChainID: build.Eip155ChainId, Value: big.NewInt(100), Nonce: 0, diff --git a/itests/gateway_test.go b/itests/gateway_test.go index 2dc4e1034d5..b994d6de3c8 100644 --- a/itests/gateway_test.go +++ b/itests/gateway_test.go @@ -24,7 +24,6 @@ import ( "github.com/filecoin-project/lotus/api/client" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/cli/clicommands" "github.com/filecoin-project/lotus/gateway" "github.com/filecoin-project/lotus/itests/kit" "github.com/filecoin-project/lotus/itests/multisig" @@ -194,46 +193,6 @@ func TestGatewayMsigCLI(t *testing.T) { multisig.RunMultisigTests(t, lite) } -func TestGatewayDealFlow(t *testing.T) { - //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, - //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 - //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 - //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 - kit.QuietMiningLogs() - - blocktime := 5 * time.Millisecond - ctx := context.Background() - nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit) - - time.Sleep(5 * time.Second) - - // For these tests where the block time is artificially short, just use - // a deal start epoch that is guaranteed to be far enough in the future - // so that the deal starts sealing in time - dealStartEpoch := abi.ChainEpoch(2 << 12) - - dh := kit.NewDealHarness(t, nodes.lite, nodes.miner, nodes.miner) - dealCid, res, _ := dh.MakeOnlineDeal(context.Background(), kit.MakeFullDealParams{ - Rseed: 6, - StartEpoch: dealStartEpoch, - }) - dh.PerformRetrieval(ctx, dealCid, res.Root, false) -} - -func TestGatewayCLIDealFlow(t *testing.T) { - //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, - //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 - //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 - //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 - kit.QuietMiningLogs() - - blocktime := 5 * time.Millisecond - ctx := context.Background() - nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit) - - kit.RunClientTest(t, clicommands.Commands, nodes.lite) -} - type testNodes struct { lite *kit.TestFullNode full *kit.TestFullNode diff --git a/itests/harmonytask_test.go b/itests/harmonytask_test.go deleted file mode 100644 index 94024e3e12e..00000000000 --- a/itests/harmonytask_test.go +++ /dev/null @@ -1,303 +0,0 @@ -package itests - -import ( - "context" - "errors" - "fmt" - "sort" - "sync" - "testing" - "time" - - logging "github.com/ipfs/go-log/v2" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/lotus/itests/kit" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/node/impl" -) - -type task1 struct { - toAdd []int - myPersonalTableLock sync.Mutex - myPersonalTable map[harmonytask.TaskID]int // This would typically be a DB table - WorkCompleted []string -} - -func withDbSetup(t *testing.T, f func(*kit.TestMiner)) { - _, miner, _ := kit.EnsembleMinimal(t, - kit.LatestActorsAt(-1), - kit.MockProofs(), - kit.WithSectorIndexDB(), - ) - _ = logging.SetLogLevel("harmonytask", "debug") - - f(miner) -} - -func (t *task1) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - if !stillOwned() { - return false, errors.New("Why not still owned?") - } - t.myPersonalTableLock.Lock() - defer t.myPersonalTableLock.Unlock() - t.WorkCompleted = append(t.WorkCompleted, fmt.Sprintf("taskResult%d", t.myPersonalTable[taskID])) - return true, nil -} -func (t *task1) CanAccept(list []harmonytask.TaskID, e *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - return &list[0], nil -} -func (t *task1) TypeDetails() harmonytask.TaskTypeDetails { - return harmonytask.TaskTypeDetails{ - Max: 100, - Name: "ThingOne", - MaxFailures: 1, - Cost: resources.Resources{ - Cpu: 1, - Ram: 100 << 10, // at 100kb, it's tiny - }, - } -} -func (t *task1) Adder(add harmonytask.AddTaskFunc) { - for _, vTmp := range t.toAdd { - v := vTmp - add(func(tID harmonytask.TaskID, tx *harmonydb.Tx) (bool, error) { - t.myPersonalTableLock.Lock() - defer t.myPersonalTableLock.Unlock() - - t.myPersonalTable[tID] = v - return true, nil - }) - } -} - -func init() { - //logging.SetLogLevel("harmonydb", "debug") - //logging.SetLogLevel("harmonytask", "debug") -} - -func TestHarmonyTasks(t *testing.T) { - //t.Parallel() - withDbSetup(t, func(m *kit.TestMiner) { - cdb := m.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB - t1 := &task1{ - toAdd: []int{56, 73}, - myPersonalTable: map[harmonytask.TaskID]int{}, - } - harmonytask.POLL_DURATION = time.Millisecond * 100 - e, err := harmonytask.New(cdb, []harmonytask.TaskInterface{t1}, "test:1") - require.NoError(t, err) - time.Sleep(time.Second) // do the work. FLAKYNESS RISK HERE. - e.GracefullyTerminate() - expected := []string{"taskResult56", "taskResult73"} - sort.Strings(t1.WorkCompleted) - require.Equal(t, expected, t1.WorkCompleted, "unexpected results") - }) -} - -type passthru struct { - dtl harmonytask.TaskTypeDetails - do func(tID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) - canAccept func(list []harmonytask.TaskID, e *harmonytask.TaskEngine) (*harmonytask.TaskID, error) - adder func(add harmonytask.AddTaskFunc) -} - -func (t *passthru) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - return t.do(taskID, stillOwned) -} -func (t *passthru) CanAccept(list []harmonytask.TaskID, e *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - return t.canAccept(list, e) -} -func (t *passthru) TypeDetails() harmonytask.TaskTypeDetails { - return t.dtl -} -func (t *passthru) Adder(add harmonytask.AddTaskFunc) { - if t.adder != nil { - t.adder(add) - } -} - -// Common stuff -var dtl = harmonytask.TaskTypeDetails{Name: "foo", Max: -1, Cost: resources.Resources{}} -var lettersMutex sync.Mutex - -func fooLetterAdder(t *testing.T, cdb *harmonydb.DB) *passthru { - return &passthru{ - dtl: dtl, - canAccept: func(list []harmonytask.TaskID, e *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - return nil, nil - }, - adder: func(add harmonytask.AddTaskFunc) { - for _, vTmp := range []string{"A", "B"} { - v := vTmp - add(func(tID harmonytask.TaskID, tx *harmonydb.Tx) (bool, error) { - _, err := tx.Exec("INSERT INTO itest_scratch (some_int, content) VALUES ($1,$2)", tID, v) - require.NoError(t, err) - return true, nil - }) - } - }, - } -} -func fooLetterSaver(t *testing.T, cdb *harmonydb.DB, dest *[]string) *passthru { - return &passthru{ - dtl: dtl, - canAccept: func(list []harmonytask.TaskID, e *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - return &list[0], nil - }, - do: func(tID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - var content string - err = cdb.QueryRow(context.Background(), - "SELECT content FROM itest_scratch WHERE some_int=$1", tID).Scan(&content) - require.NoError(t, err) - lettersMutex.Lock() - defer lettersMutex.Unlock() - *dest = append(*dest, content) - return true, nil - }, - } -} - -func TestHarmonyTasksWith2PartiesPolling(t *testing.T) { - //t.Parallel() - withDbSetup(t, func(m *kit.TestMiner) { - cdb := m.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB - senderParty := fooLetterAdder(t, cdb) - var dest []string - workerParty := fooLetterSaver(t, cdb, &dest) - harmonytask.POLL_DURATION = time.Millisecond * 100 - sender, err := harmonytask.New(cdb, []harmonytask.TaskInterface{senderParty}, "test:1") - require.NoError(t, err) - worker, err := harmonytask.New(cdb, []harmonytask.TaskInterface{workerParty}, "test:2") - require.NoError(t, err) - time.Sleep(time.Second) // do the work. FLAKYNESS RISK HERE. - sender.GracefullyTerminate() - worker.GracefullyTerminate() - sort.Strings(dest) - require.Equal(t, []string{"A", "B"}, dest) - }) -} - -func TestWorkStealing(t *testing.T) { - //t.Parallel() - withDbSetup(t, func(m *kit.TestMiner) { - cdb := m.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB - ctx := context.Background() - - // The dead worker will be played by a few SQL INSERTS. - _, err := cdb.Exec(ctx, `INSERT INTO harmony_machines - (id, last_contact,host_and_port, cpu, ram, gpu) - VALUES (300, DATE '2000-01-01', 'test:1', 4, 400000, 1)`) - require.ErrorIs(t, err, nil) - _, err = cdb.Exec(ctx, `INSERT INTO harmony_task - (id, name, owner_id, posted_time, added_by) - VALUES (1234, 'foo', 300, DATE '2000-01-01', 300)`) - require.ErrorIs(t, err, nil) - _, err = cdb.Exec(ctx, "INSERT INTO itest_scratch (some_int, content) VALUES (1234, 'M')") - require.ErrorIs(t, err, nil) - - harmonytask.POLL_DURATION = time.Millisecond * 100 - harmonytask.CLEANUP_FREQUENCY = time.Millisecond * 100 - var dest []string - worker, err := harmonytask.New(cdb, []harmonytask.TaskInterface{fooLetterSaver(t, cdb, &dest)}, "test:2") - require.ErrorIs(t, err, nil) - time.Sleep(time.Second) // do the work. FLAKYNESS RISK HERE. - worker.GracefullyTerminate() - require.Equal(t, []string{"M"}, dest) - }) -} - -func TestTaskRetry(t *testing.T) { - //t.Parallel() - withDbSetup(t, func(m *kit.TestMiner) { - cdb := m.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB - senderParty := fooLetterAdder(t, cdb) - harmonytask.POLL_DURATION = time.Millisecond * 100 - sender, err := harmonytask.New(cdb, []harmonytask.TaskInterface{senderParty}, "test:1") - require.NoError(t, err) - - alreadyFailed := map[string]bool{} - var dest []string - fails2xPerMsg := &passthru{ - dtl: dtl, - canAccept: func(list []harmonytask.TaskID, e *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - return &list[0], nil - }, - do: func(tID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - var content string - err = cdb.QueryRow(context.Background(), - "SELECT content FROM itest_scratch WHERE some_int=$1", tID).Scan(&content) - require.NoError(t, err) - lettersMutex.Lock() - defer lettersMutex.Unlock() - if !alreadyFailed[content] { - alreadyFailed[content] = true - return false, errors.New("intentional 'error'") - } - dest = append(dest, content) - return true, nil - }, - } - rcv, err := harmonytask.New(cdb, []harmonytask.TaskInterface{fails2xPerMsg}, "test:2") - require.NoError(t, err) - time.Sleep(time.Second) - sender.GracefullyTerminate() - rcv.GracefullyTerminate() - sort.Strings(dest) - require.Equal(t, []string{"A", "B"}, dest) - type hist struct { - TaskID int - Result bool - Err string - } - var res []hist - require.NoError(t, cdb.Select(context.Background(), &res, - `SELECT task_id, result, err FROM harmony_task_history - ORDER BY result DESC, task_id`)) - - require.Equal(t, []hist{ - {1, true, ""}, - {2, true, ""}, - {1, false, "error: intentional 'error'"}, - {2, false, "error: intentional 'error'"}}, res) - }) -} - -func TestBoredom(t *testing.T) { - //t.Parallel() - withDbSetup(t, func(m *kit.TestMiner) { - cdb := m.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB - harmonytask.POLL_DURATION = time.Millisecond * 100 - var taskID harmonytask.TaskID - var ran bool - boredParty := &passthru{ - dtl: harmonytask.TaskTypeDetails{ - Name: "boredTest", - Max: -1, - Cost: resources.Resources{}, - IAmBored: func(add harmonytask.AddTaskFunc) error { - add(func(tID harmonytask.TaskID, tx *harmonydb.Tx) (bool, error) { - taskID = tID - return true, nil - }) - return nil - }, - }, - canAccept: func(list []harmonytask.TaskID, e *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - require.Equal(t, harmonytask.WorkSourceIAmBored, e.WorkOrigin) - return &list[0], nil - }, - do: func(tID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - require.Equal(t, taskID, tID) - ran = true - return true, nil - }, - } - ht, err := harmonytask.New(cdb, []harmonytask.TaskInterface{boredParty}, "test:1") - require.NoError(t, err) - require.Eventually(t, func() bool { return ran }, time.Second, time.Millisecond*100) - ht.GracefullyTerminate() - }) -} diff --git a/itests/kit/blockminer.go b/itests/kit/blockminer.go index 40d23a6cdf0..75842567819 100644 --- a/itests/kit/blockminer.go +++ b/itests/kit/blockminer.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/go-state-types/abi" @@ -20,6 +21,7 @@ import ( "github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v1api" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/miner" ) @@ -29,11 +31,13 @@ type BlockMiner struct { t *testing.T miner *TestMiner - nextNulls int64 - pause chan struct{} - unpause chan struct{} - wg sync.WaitGroup - cancel context.CancelFunc + nextNulls int64 + postWatchMiners []address.Address + postWatchMinersLk sync.Mutex + pause chan struct{} + unpause chan struct{} + wg sync.WaitGroup + cancel context.CancelFunc } func NewBlockMiner(t *testing.T, miner *TestMiner) *BlockMiner { @@ -46,19 +50,58 @@ func NewBlockMiner(t *testing.T, miner *TestMiner) *BlockMiner { } } +type minerDeadline struct { + addr address.Address + deadline dline.Info +} + +type minerDeadlines []minerDeadline + +func (mds minerDeadlines) CloseList() []abi.ChainEpoch { + var ret []abi.ChainEpoch + for _, md := range mds { + ret = append(ret, md.deadline.Last()) + } + return ret +} + +func (mds minerDeadlines) MinerStringList() []string { + var ret []string + for _, md := range mds { + ret = append(ret, md.addr.String()) + } + return ret +} + +// FilterByLast returns a new minerDeadlines with only the deadlines that have a Last() epoch +// greater than or equal to last. +func (mds minerDeadlines) FilterByLast(last abi.ChainEpoch) minerDeadlines { + var ret minerDeadlines + for _, md := range mds { + if last >= md.deadline.Last() { + ret = append(ret, md) + } + } + return ret +} + type partitionTracker struct { + minerAddr address.Address partitions []api.Partition posted bitfield.BitField } -func newPartitionTracker(ctx context.Context, dlIdx uint64, bm *BlockMiner) *partitionTracker { - dlines, err := bm.miner.FullNode.StateMinerDeadlines(ctx, bm.miner.ActorAddr, types.EmptyTSK) - require.NoError(bm.t, err) +// newPartitionTracker creates a new partitionTracker that tracks the deadline index dlIdx for the +// given minerAddr. It uses the BlockMiner bm to interact with the chain. +func newPartitionTracker(ctx context.Context, t *testing.T, client v1api.FullNode, minerAddr address.Address, dlIdx uint64) *partitionTracker { + dlines, err := client.StateMinerDeadlines(ctx, minerAddr, types.EmptyTSK) + require.NoError(t, err) dl := dlines[dlIdx] - parts, err := bm.miner.FullNode.StateMinerPartitions(ctx, bm.miner.ActorAddr, dlIdx, types.EmptyTSK) - require.NoError(bm.t, err) + parts, err := client.StateMinerPartitions(ctx, minerAddr, dlIdx, types.EmptyTSK) + require.NoError(t, err) return &partitionTracker{ + minerAddr: minerAddr, partitions: parts, posted: dl.PostSubmissions, } @@ -74,11 +117,11 @@ func (p *partitionTracker) done(t *testing.T) bool { return uint64(len(p.partitions)) == p.count(t) } -func (p *partitionTracker) recordIfPost(t *testing.T, bm *BlockMiner, msg *types.Message) (ret bool) { +func (p *partitionTracker) recordIfPost(t *testing.T, msg *types.Message) (ret bool) { defer func() { ret = p.done(t) }() - if !(msg.To == bm.miner.ActorAddr) { + if !(msg.To == p.minerAddr) { return } if msg.Method != builtin.MethodsMiner.SubmitWindowedPoSt { @@ -92,19 +135,18 @@ func (p *partitionTracker) recordIfPost(t *testing.T, bm *BlockMiner, msg *types return } -func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, dlinfo *dline.Info) { - - tracker := newPartitionTracker(ctx, dlinfo.Index, bm) +func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, minerAddr address.Address, dlinfo dline.Info) { + tracker := newPartitionTracker(ctx, bm.t, bm.miner.FullNode, minerAddr, dlinfo.Index) if !tracker.done(bm.t) { // need to wait for post bm.t.Logf("expect %d partitions proved but only see %d", len(tracker.partitions), tracker.count(bm.t)) - poolEvts, err := bm.miner.FullNode.MpoolSub(ctx) //subscribe before checking pending so we don't miss any events + poolEvts, err := bm.miner.FullNode.MpoolSub(ctx) // subscribe before checking pending so we don't miss any events require.NoError(bm.t, err) // First check pending messages we'll mine this epoch msgs, err := bm.miner.FullNode.MpoolPending(ctx, types.EmptyTSK) require.NoError(bm.t, err) for _, msg := range msgs { - if tracker.recordIfPost(bm.t, bm, &msg.Message) { + if tracker.recordIfPost(bm.t, &msg.Message) { fmt.Printf("found post in mempool pending\n") } } @@ -114,13 +156,13 @@ func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, dlinfo *d msgs, err := bm.miner.FullNode.ChainGetBlockMessages(ctx, bc) require.NoError(bm.t, err) for _, msg := range msgs.BlsMessages { - if tracker.recordIfPost(bm.t, bm, msg) { + if tracker.recordIfPost(bm.t, msg) { fmt.Printf("found post in message of prev tipset\n") } } for _, msg := range msgs.SecpkMessages { - if tracker.recordIfPost(bm.t, bm, &msg.Message) { + if tracker.recordIfPost(bm.t, &msg.Message) { fmt.Printf("found post in message of prev tipset\n") } } @@ -139,7 +181,7 @@ func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, dlinfo *d bm.t.Logf("pool event: %d", evt.Type) if evt.Type == api.MpoolAdd { bm.t.Logf("incoming message %v", evt.Message) - if tracker.recordIfPost(bm.t, bm, &evt.Message.Message) { + if tracker.recordIfPost(bm.t, &evt.Message.Message) { fmt.Printf("found post in mempool evt\n") break POOL } @@ -151,11 +193,24 @@ func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, dlinfo *d } } +// WatchMinerForPost adds a miner to the list of miners that the BlockMiner will watch for window +// post submissions when using MineBlocksMustPost. This is useful when we have more than just the +// BlockMiner submitting posts, particularly in the case of UnmanagedMiners which don't participate +// in block mining. +func (bm *BlockMiner) WatchMinerForPost(minerAddr address.Address) { + bm.postWatchMinersLk.Lock() + bm.postWatchMiners = append(bm.postWatchMiners, minerAddr) + bm.postWatchMinersLk.Unlock() +} + // Like MineBlocks but refuses to mine until the window post scheduler has wdpost messages in the mempool // and everything shuts down if a post fails. It also enforces that every block mined succeeds func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Duration) { time.Sleep(time.Second) + // watch for our own window posts + bm.WatchMinerForPost(bm.miner.ActorAddr) + // wrap context in a cancellable context. ctx, bm.cancel = context.WithCancel(ctx) bm.wg.Add(1) @@ -182,11 +237,25 @@ func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Dur ts, err := bm.miner.FullNode.ChainHead(ctx) require.NoError(bm.t, err) - dlinfo, err := bm.miner.FullNode.StateMinerProvingDeadline(ctx, bm.miner.ActorAddr, ts.Key()) - require.NoError(bm.t, err) - if ts.Height()+5+abi.ChainEpoch(nulls) >= dlinfo.Last() { // Next block brings us past the last epoch in dline, we need to wait for miner to post - bm.t.Logf("forcing post to get in before deadline closes at %d", dlinfo.Last()) - bm.forcePoSt(ctx, ts, dlinfo) + // Get current deadline information for all miners, then filter by the ones that are about to + // close so we can force a post for them. + bm.postWatchMinersLk.Lock() + var impendingDeadlines minerDeadlines + for _, minerAddr := range bm.postWatchMiners { + dlinfo, err := bm.miner.FullNode.StateMinerProvingDeadline(ctx, minerAddr, ts.Key()) + require.NoError(bm.t, err) + require.NotNil(bm.t, dlinfo, "no deadline info for miner %s", minerAddr) + impendingDeadlines = append(impendingDeadlines, minerDeadline{addr: minerAddr, deadline: *dlinfo}) + } + bm.postWatchMinersLk.Unlock() + impendingDeadlines = impendingDeadlines.FilterByLast(ts.Height() + 5 + abi.ChainEpoch(nulls)) + + if len(impendingDeadlines) > 0 { + // Next block brings us too close for at least one deadline, we need to wait for miners to post + bm.t.Logf("forcing post to get in if due before deadline closes at %v for %v", impendingDeadlines.CloseList(), impendingDeadlines.MinerStringList()) + for _, md := range impendingDeadlines { + bm.forcePoSt(ctx, ts, md.addr, md.deadline) + } } var target abi.ChainEpoch @@ -216,10 +285,13 @@ func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Dur return } if !success { - // if we are mining a new null block and it brings us past deadline boundary we need to wait for miner to post - if ts.Height()+5+abi.ChainEpoch(nulls+i) >= dlinfo.Last() { - bm.t.Logf("forcing post to get in before deadline closes at %d", dlinfo.Last()) - bm.forcePoSt(ctx, ts, dlinfo) + // if we are mining a new null block and it brings us past deadline boundary we need to wait for miners to post + impendingDeadlines = impendingDeadlines.FilterByLast(ts.Height() + 5 + abi.ChainEpoch(nulls+i)) + if len(impendingDeadlines) > 0 { + bm.t.Logf("forcing post to get in if due before deadline closes at %v for %v", impendingDeadlines.CloseList(), impendingDeadlines.MinerStringList()) + for _, md := range impendingDeadlines { + bm.forcePoSt(ctx, ts, md.addr, md.deadline) + } } } } @@ -378,4 +450,7 @@ func (bm *BlockMiner) Stop() { close(bm.pause) bm.pause = nil } + bm.postWatchMinersLk.Lock() + bm.postWatchMiners = nil + bm.postWatchMinersLk.Unlock() } diff --git a/itests/kit/client.go b/itests/kit/client.go deleted file mode 100644 index 18e4259e4e8..00000000000 --- a/itests/kit/client.go +++ /dev/null @@ -1,161 +0,0 @@ -package kit - -import ( - "context" - "fmt" - "math/rand" - "os" - "path/filepath" - "regexp" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/require" - lcli "github.com/urfave/cli/v2" - - "github.com/filecoin-project/specs-actors/v2/actors/builtin" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/types" -) - -// RunClientTest exercises some of the Client CLI commands -func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode *TestFullNode) { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - - // Create mock CLI - mockCLI := NewMockCLI(ctx, t, cmds, api.NodeFull) - clientCLI := mockCLI.Client(clientNode.ListenAddr) - - // Get the Miner address - addrs, err := clientNode.StateListMiners(ctx, types.EmptyTSK) - require.NoError(t, err) - require.Len(t, addrs, 1) - - minerAddr := addrs[0] - fmt.Println("Miner:", minerAddr) - - // client query-ask - out := clientCLI.RunCmd("client", "query-ask", minerAddr.String()) - require.Regexp(t, regexp.MustCompile("Ask:"), out) - - // Create a deal (non-interactive) - // client deal --start-epoch= 1000000attofil - res, _, _, err := CreateImportFile(ctx, clientNode, 1, 0) - - require.NoError(t, err) - startEpoch := fmt.Sprintf("--start-epoch=%d", 2<<12) - dataCid := res.Root - price := "1000000attofil" - duration := fmt.Sprintf("%d", build.MinDealDuration) - out = clientCLI.RunCmd("client", "deal", startEpoch, dataCid.String(), minerAddr.String(), price, duration) - fmt.Println("client deal", out) - - // Create a deal (interactive) - // client deal - // - // (in days) - // - // "no" (verified Client) - // "yes" (confirm deal) - res, _, _, err = CreateImportFile(ctx, clientNode, 2, 0) - require.NoError(t, err) - dataCid2 := res.Root - duration = fmt.Sprintf("%d", build.MinDealDuration/builtin.EpochsInDay) - cmd := []string{"client", "deal"} - interactiveCmds := []string{ - dataCid2.String(), - duration, - minerAddr.String(), - "no", - "yes", - } - out = clientCLI.RunInteractiveCmd(cmd, interactiveCmds) - fmt.Println("client deal:\n", out) - - // Wait for provider to start sealing deal - dealStatus := "" - for { - // client list-deals - out = clientCLI.RunCmd("client", "list-deals", "--show-failed") - fmt.Println("list-deals:\n", out) - - lines := strings.Split(out, "\n") - require.GreaterOrEqual(t, len(lines), 2) - re := regexp.MustCompile(`\s+`) - parts := re.Split(lines[1], -1) - if len(parts) < 4 { - require.Fail(t, "bad list-deals output format") - } - dealStatus = parts[3] - fmt.Println(" Deal status:", dealStatus) - - st := CategorizeDealState(dealStatus) - require.NotEqual(t, TestDealStateFailed, st) - if st == TestDealStateComplete { - break - } - - time.Sleep(time.Second) - } - - // client retrieval-ask --size=1 - out = clientCLI.RunCmd("client", "retrieval-ask", "--size=1", minerAddr.String(), dataCid.String()) - require.Regexp(t, regexp.MustCompile("Ask:"), out) - fmt.Println("retrieval ask:\n", out) - - // Retrieve the first file from the Miner - // client retrieve - tmpdir, err := os.MkdirTemp(os.TempDir(), "test-cli-client") - require.NoError(t, err) - path := filepath.Join(tmpdir, "outfile.dat") - - // Wait for client retrieve to succeed. - for { - out = clientCLI.RunCmd("client", "retrieve", dataCid.String(), path) - fmt.Println("retrieve:\n", out) - if strings.Contains(out, "Success") { - break - } - } -} - -func CreateImportFile(ctx context.Context, client api.FullNode, rseed int, size int) (res *api.ImportRes, path string, data []byte, err error) { - data, path, err = createRandomFile(rseed, size) - if err != nil { - return nil, "", nil, err - } - - res, err = client.ClientImport(ctx, api.FileRef{Path: path}) - if err != nil { - return nil, "", nil, err - } - return res, path, data, nil -} - -func createRandomFile(rseed, size int) ([]byte, string, error) { - if size == 0 { - size = 1600 - } - data := make([]byte, size) - _, err := rand.New(rand.NewSource(int64(rseed))).Read(data) - if err != nil { - return nil, "", err - } - - dir, err := os.MkdirTemp(os.TempDir(), "test-make-deal-") - if err != nil { - return nil, "", err - } - - path := filepath.Join(dir, "sourcefile.dat") - err = os.WriteFile(path, data, 0644) - if err != nil { - return nil, "", err - } - - return data, path, nil -} diff --git a/itests/kit/deals.go b/itests/kit/deals.go deleted file mode 100644 index eb6b58667dc..00000000000 --- a/itests/kit/deals.go +++ /dev/null @@ -1,483 +0,0 @@ -package kit - -import ( - "context" - "errors" - "fmt" - "os" - "strings" - "testing" - "time" - - "github.com/ipfs/boxo/files" - dag "github.com/ipfs/boxo/ipld/merkledag" - dstest "github.com/ipfs/boxo/ipld/merkledag/test" - unixfile "github.com/ipfs/boxo/ipld/unixfs/file" - "github.com/ipfs/go-cid" - ipldcbor "github.com/ipfs/go-ipld-cbor" - ipld "github.com/ipfs/go-ipld-format" - "github.com/ipld/go-car" - _ "github.com/ipld/go-ipld-prime/codec/dagcbor" - "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/shared_testutil" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/types" - sealing "github.com/filecoin-project/lotus/storage/pipeline" -) - -type DealHarness struct { - t *testing.T - client *TestFullNode - main *TestMiner - market *TestMiner -} - -type MakeFullDealParams struct { - Rseed int - FastRet bool - StartEpoch abi.ChainEpoch - UseCARFileForStorageDeal bool - - // SuspendUntilCryptoeconStable suspends deal-making, until cryptoecon - // parameters are stabilised. This affects projected collateral, and tests - // will fail in network version 13 and higher if deals are started too soon - // after network birth. - // - // The reason is that the formula for collateral calculation takes - // circulating supply into account: - // - // [portion of power this deal will be] * [~1% of tokens]. - // - // In the first epochs after genesis, the total circulating supply is - // changing dramatically in percentual terms. Therefore, if the deal is - // proposed too soon, by the time it gets published on chain, the quoted - // provider collateral will no longer be valid. - // - // The observation is that deals fail with: - // - // GasEstimateMessageGas error: estimating gas used: message execution - // failed: exit 16, reason: Provider collateral out of bounds. (RetCode=16) - // - // Enabling this will suspend deal-making until the network has reached a - // height of 300. - SuspendUntilCryptoeconStable bool -} - -// NewDealHarness creates a test harness that contains testing utilities for deals. -func NewDealHarness(t *testing.T, client *TestFullNode, main *TestMiner, market *TestMiner) *DealHarness { - return &DealHarness{ - t: t, - client: client, - main: main, - market: market, - } -} - -// MakeOnlineDeal makes an online deal, generating a random file with the -// supplied seed, and setting the specified fast retrieval flag and start epoch -// on the storage deal. It returns when the deal is sealed. -// -// TODO: convert input parameters to struct, and add size as an input param. -func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, params MakeFullDealParams) (deal *cid.Cid, res *api.ImportRes, path string) { - deal, res, path = dh.StartRandomDeal(ctx, params) - - fmt.Printf("WAIT DEAL SEALEDS START\n") - dh.WaitDealSealed(ctx, deal, false, false, nil) - fmt.Printf("WAIT DEAL SEALEDS END\n") - return deal, res, path -} - -func (dh *DealHarness) StartRandomDeal(ctx context.Context, params MakeFullDealParams) (deal *cid.Cid, res *api.ImportRes, path string) { - if params.UseCARFileForStorageDeal { - res, _, path = dh.client.ClientImportCARFile(ctx, params.Rseed, 200) - } else { - res, path = dh.client.CreateImportFile(ctx, params.Rseed, 0) - } - - dh.t.Logf("FILE CID: %s", res.Root) - - if params.SuspendUntilCryptoeconStable { - dh.t.Logf("deal-making suspending until cryptecon parameters have stabilised") - ts := dh.client.WaitTillChain(ctx, HeightAtLeast(300)) - dh.t.Logf("deal-making continuing; current height is %d", ts.Height()) - } - - dp := dh.DefaultStartDealParams() - dp.Data.Root = res.Root - dp.DealStartEpoch = params.StartEpoch - dp.FastRetrieval = params.FastRet - deal = dh.StartDeal(ctx, dp) - - return deal, res, path -} - -func (dh *DealHarness) DefaultStartDealParams() api.StartDealParams { - dp := api.StartDealParams{ - Data: &storagemarket.DataRef{TransferType: storagemarket.TTGraphsync}, - EpochPrice: types.NewInt(1000000), - MinBlocksDuration: uint64(build.MinDealDuration), - } - - var err error - dp.Miner, err = dh.main.ActorAddress(context.Background()) - require.NoError(dh.t, err) - - dp.Wallet, err = dh.client.WalletDefaultAddress(context.Background()) - require.NoError(dh.t, err) - - return dp -} - -// StartDeal starts a storage deal between the client and the miner. -func (dh *DealHarness) StartDeal(ctx context.Context, dealParams api.StartDealParams) *cid.Cid { - dealProposalCid, err := dh.client.ClientStartDeal(ctx, &dealParams) - require.NoError(dh.t, err) - return dealProposalCid -} - -// WaitDealSealed waits until the deal is sealed. -func (dh *DealHarness) WaitDealSealed(ctx context.Context, deal *cid.Cid, noseal, noSealStart bool, cb func()) { -loop: - for { - di, err := dh.client.ClientGetDealInfo(ctx, *deal) - require.NoError(dh.t, err) - - switch di.State { - case storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing: - if noseal { - return - } - if !noSealStart { - dh.StartSealingWaiting(ctx) - } - case storagemarket.StorageDealProposalRejected: - dh.t.Fatal("deal rejected") - case storagemarket.StorageDealFailing: - dh.t.Fatal("deal failed") - case storagemarket.StorageDealError: - dh.t.Fatal("deal errored", di.Message) - case storagemarket.StorageDealActive: - dh.t.Log("COMPLETE", di) - break loop - } - - mds, err := dh.market.MarketListIncompleteDeals(ctx) - require.NoError(dh.t, err) - - var minerState storagemarket.StorageDealStatus - for _, md := range mds { - if md.DealID == di.DealID { - minerState = md.State - break - } - } - - dh.t.Logf("Deal %d state: client:%s provider:%s\n", di.DealID, storagemarket.DealStates[di.State], storagemarket.DealStates[minerState]) - time.Sleep(time.Second / 2) - if cb != nil { - cb() - } - } - fmt.Printf("WAIT DEAL SEALED LOOP BROKEN\n") -} - -// WaitDealSealedQuiet waits until the deal is sealed, without logging anything. -func (dh *DealHarness) WaitDealSealedQuiet(ctx context.Context, deal *cid.Cid, noseal, noSealStart bool, cb func()) { -loop: - for { - di, err := dh.client.ClientGetDealInfo(ctx, *deal) - require.NoError(dh.t, err) - - switch di.State { - case storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing: - if noseal { - return - } - if !noSealStart { - dh.StartSealingWaiting(ctx) - } - case storagemarket.StorageDealProposalRejected: - dh.t.Fatal("deal rejected") - case storagemarket.StorageDealFailing: - dh.t.Fatal("deal failed") - case storagemarket.StorageDealError: - dh.t.Fatal("deal errored", di.Message) - case storagemarket.StorageDealActive: - break loop - } - - _, err = dh.market.MarketListIncompleteDeals(ctx) - require.NoError(dh.t, err) - - time.Sleep(time.Second / 2) - if cb != nil { - cb() - } - } -} - -func (dh *DealHarness) ExpectDealFailure(ctx context.Context, deal *cid.Cid, errs string) error { - for { - di, err := dh.client.ClientGetDealInfo(ctx, *deal) - require.NoError(dh.t, err) - - switch di.State { - case storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing: - return fmt.Errorf("deal is sealing, and we expected an error: %s", errs) - case storagemarket.StorageDealProposalRejected: - if strings.Contains(di.Message, errs) { - return nil - } - return fmt.Errorf("unexpected error: %s ; expected: %s", di.Message, errs) - case storagemarket.StorageDealFailing: - if strings.Contains(di.Message, errs) { - return nil - } - return fmt.Errorf("unexpected error: %s ; expected: %s", di.Message, errs) - case storagemarket.StorageDealError: - if strings.Contains(di.Message, errs) { - return nil - } - return fmt.Errorf("unexpected error: %s ; expected: %s", di.Message, errs) - case storagemarket.StorageDealActive: - return errors.New("expected to get an error, but didn't get one") - } - - mds, err := dh.market.MarketListIncompleteDeals(ctx) - require.NoError(dh.t, err) - - var minerState storagemarket.StorageDealStatus - for _, md := range mds { - if md.DealID == di.DealID { - minerState = md.State - break - } - } - - dh.t.Logf("Deal %d state: client:%s provider:%s\n", di.DealID, storagemarket.DealStates[di.State], storagemarket.DealStates[minerState]) - time.Sleep(time.Second / 2) - } -} - -// WaitDealPublished waits until the deal is published. -func (dh *DealHarness) WaitDealPublished(ctx context.Context, deal *cid.Cid) { - subCtx, cancel := context.WithCancel(ctx) - defer cancel() - - updates, err := dh.market.MarketGetDealUpdates(subCtx) - require.NoError(dh.t, err) - - for { - select { - case <-ctx.Done(): - dh.t.Fatal("context timeout") - case di := <-updates: - if deal.Equals(di.ProposalCid) { - switch di.State { - case storagemarket.StorageDealProposalRejected: - dh.t.Fatal("deal rejected") - case storagemarket.StorageDealFailing: - dh.t.Fatal("deal failed") - case storagemarket.StorageDealError: - dh.t.Fatal("deal errored", di.Message) - case storagemarket.StorageDealFinalizing, storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing, storagemarket.StorageDealActive: - dh.t.Log("COMPLETE", di) - return - } - dh.t.Log("Deal state: ", storagemarket.DealStates[di.State]) - } - } - } -} - -func (dh *DealHarness) StartSealingWaiting(ctx context.Context) { - snums, err := dh.main.SectorsListNonGenesis(ctx) - require.NoError(dh.t, err) - for _, snum := range snums { - si, err := dh.main.SectorsStatus(ctx, snum, false) - require.NoError(dh.t, err) - - dh.t.Logf("Sector state <%d>-[%d]:, %s", snum, si.SealProof, si.State) - if si.State == api.SectorState(sealing.WaitDeals) { - require.NoError(dh.t, dh.main.SectorStartSealing(ctx, snum)) - } - - dh.main.FlushSealingBatches(ctx) - } -} - -func (dh *DealHarness) PerformRetrieval(ctx context.Context, deal *cid.Cid, root cid.Cid, carExport bool, offers ...api.QueryOffer) (path string) { - return dh.PerformRetrievalWithOrder(ctx, deal, root, carExport, func(offer api.QueryOffer, a address.Address) api.RetrievalOrder { - return offer.Order(a) - }, offers...) -} - -func (dh *DealHarness) PerformRetrievalWithOrder(ctx context.Context, deal *cid.Cid, root cid.Cid, carExport bool, makeOrder func(api.QueryOffer, address.Address) api.RetrievalOrder, offers ...api.QueryOffer) (path string) { - var offer api.QueryOffer - if len(offers) == 0 { - // perform retrieval. - info, err := dh.client.ClientGetDealInfo(ctx, *deal) - require.NoError(dh.t, err) - - offers, err := dh.client.ClientFindData(ctx, root, &info.PieceCID) - require.NoError(dh.t, err) - require.NotEmpty(dh.t, offers, "no offers") - offer = offers[0] - } else { - offer = offers[0] - } - - carFile := dh.t.TempDir() + string(os.PathSeparator) + "ret-car-" + root.String() - - caddr, err := dh.client.WalletDefaultAddress(ctx) - require.NoError(dh.t, err) - - updatesCtx, cancel := context.WithCancel(ctx) - updates, err := dh.client.ClientGetRetrievalUpdates(updatesCtx) - require.NoError(dh.t, err) - - order := makeOrder(offer, caddr) - - retrievalRes, err := dh.client.ClientRetrieve(ctx, order) - require.NoError(dh.t, err) -consumeEvents: - for { - var evt api.RetrievalInfo - select { - case <-updatesCtx.Done(): - dh.t.Fatal("Retrieval Timed Out") - case evt = <-updates: - if evt.ID != retrievalRes.DealID { - continue - } - } - switch evt.Status { - case retrievalmarket.DealStatusCompleted: - break consumeEvents - case retrievalmarket.DealStatusRejected: - dh.t.Fatalf("Retrieval Proposal Rejected: %s", evt.Message) - case - retrievalmarket.DealStatusDealNotFound, - retrievalmarket.DealStatusErrored: - dh.t.Fatalf("Retrieval Error: %s", evt.Message) - } - } - cancel() - - if order.RemoteStore != nil { - // if we're retrieving into a remote store, skip export - return "" - } - - require.NoError(dh.t, dh.client.ClientExport(ctx, - api.ExportRef{ - Root: root, - DealID: retrievalRes.DealID, - }, - api.FileRef{ - Path: carFile, - IsCAR: carExport, - })) - - ret := carFile - - return ret -} - -func (dh *DealHarness) ExtractFileFromCAR(ctx context.Context, file *os.File) string { - bserv := dstest.Bserv() - ch, err := car.LoadCar(ctx, bserv.Blockstore(), file) - require.NoError(dh.t, err) - - blk, err := bserv.GetBlock(ctx, ch.Roots[0]) - require.NoError(dh.t, err) - - reg := ipld.Registry{} - reg.Register(cid.DagProtobuf, dag.DecodeProtobufBlock) - reg.Register(cid.DagCBOR, ipldcbor.DecodeBlock) - reg.Register(cid.Raw, dag.DecodeRawBlock) - - nd, err := reg.Decode(blk) - require.NoError(dh.t, err) - - dserv := dag.NewDAGService(bserv) - - fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd) - require.NoError(dh.t, err) - - tmpfile := dh.t.TempDir() + string(os.PathSeparator) + "file-in-car" + nd.Cid().String() - - err = files.WriteTo(fil, tmpfile) - require.NoError(dh.t, err) - - return tmpfile -} - -type RunConcurrentDealsOpts struct { - N int - FastRetrieval bool - CarExport bool - StartEpoch abi.ChainEpoch - UseCARFileForStorageDeal bool - IndexProvider *shared_testutil.MockIndexProvider -} - -func (dh *DealHarness) RunConcurrentDeals(opts RunConcurrentDealsOpts) { - ctx := context.Background() - errgrp, _ := errgroup.WithContext(context.Background()) - for i := 0; i < opts.N; i++ { - i := i - errgrp.Go(func() (err error) { - defer dh.t.Logf("finished concurrent deal %d/%d", i, opts.N) - defer func() { - // This is necessary because golang can't deal with test - // failures being reported from children goroutines ¯\_(ツ)_/¯ - if r := recover(); r != nil { - err = fmt.Errorf("deal failed: %s", r) - } - }() - - dh.t.Logf("making storage deal %d/%d", i, opts.N) - - deal, res, inPath := dh.MakeOnlineDeal(context.Background(), MakeFullDealParams{ - Rseed: 5 + i, - FastRet: opts.FastRetrieval, - StartEpoch: opts.StartEpoch, - UseCARFileForStorageDeal: opts.UseCARFileForStorageDeal, - }) - - // Check that the storage provider announced the deal to indexers - if opts.IndexProvider != nil { - notifs := opts.IndexProvider.GetNotifs() - _, ok := notifs[string(deal.Bytes())] - require.True(dh.t, ok) - } - - dh.t.Logf("retrieving deal %d/%d", i, opts.N) - - outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, opts.CarExport) - - if opts.CarExport { - f, err := os.Open(outPath) - require.NoError(dh.t, err) - actualFile := dh.ExtractFileFromCAR(ctx, f) - require.NoError(dh.t, f.Close()) - - AssertFilesEqual(dh.t, inPath, actualFile) - } else { - AssertFilesEqual(dh.t, inPath, outPath) - } - - return nil - }) - } - require.NoError(dh.t, errgrp.Wait()) -} diff --git a/itests/kit/ensemble.go b/itests/kit/ensemble.go index d635f98d4c1..d8f6e7f91a0 100644 --- a/itests/kit/ensemble.go +++ b/itests/kit/ensemble.go @@ -20,7 +20,6 @@ import ( "github.com/libp2p/go-libp2p/core/peer" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" "github.com/stretchr/testify/require" - "github.com/urfave/cli/v2" "github.com/filecoin-project/go-address" cborutil "github.com/filecoin-project/go-cbor-util" @@ -46,16 +45,11 @@ import ( "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/wallet/key" - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/cmd/curio/rpc" - "github.com/filecoin-project/lotus/cmd/curio/tasks" "github.com/filecoin-project/lotus/cmd/lotus-seed/seed" "github.com/filecoin-project/lotus/cmd/lotus-worker/sealworker" "github.com/filecoin-project/lotus/gateway" "github.com/filecoin-project/lotus/genesis" "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/markets/idxprov" - "github.com/filecoin-project/lotus/markets/idxprov/idxprov_test" lotusminer "github.com/filecoin-project/lotus/miner" "github.com/filecoin-project/lotus/node" "github.com/filecoin-project/lotus/node/config" @@ -124,17 +118,17 @@ type Ensemble struct { options *ensembleOpts inactive struct { - fullnodes []*TestFullNode - providernodes []*TestProviderNode - miners []*TestMiner - workers []*TestWorker + fullnodes []*TestFullNode + miners []*TestMiner + unmanagedMiners []*TestUnmanagedMiner + workers []*TestWorker } active struct { - fullnodes []*TestFullNode - providernodes []*TestProviderNode - miners []*TestMiner - workers []*TestWorker - bms map[*TestMiner]*BlockMiner + fullnodes []*TestFullNode + miners []*TestMiner + unmanagedMiners []*TestUnmanagedMiner + workers []*TestWorker + bms map[*TestMiner]*BlockMiner } genesis struct { version network.Version @@ -227,20 +221,6 @@ func (n *Ensemble) FullNode(full *TestFullNode, opts ...NodeOpt) *Ensemble { return n } -// FullNode enrolls a new Provider node. -func (n *Ensemble) Provider(lp *TestProviderNode, opts ...NodeOpt) *Ensemble { - options := DefaultNodeOpts - for _, o := range opts { - err := o(&options) - require.NoError(n.t, err) - } - - *lp = TestProviderNode{t: n.t, options: options, Deps: &deps.Deps{}} - - n.inactive.providernodes = append(n.inactive.providernodes, lp) - return n -} - // Miner enrolls a new miner, using the provided full node for chain // interactions. func (n *Ensemble) MinerEnroll(minerNode *TestMiner, full *TestFullNode, opts ...NodeOpt) *Ensemble { @@ -261,9 +241,7 @@ func (n *Ensemble) MinerEnroll(minerNode *TestMiner, full *TestFullNode, opts .. tdir, err := os.MkdirTemp("", "preseal-memgen") require.NoError(n.t, err) - minerCnt := len(n.inactive.miners) + len(n.active.miners) - - actorAddr, err := address.NewIDAddress(genesis2.MinerStart + uint64(minerCnt)) + actorAddr, err := address.NewIDAddress(genesis2.MinerStart + n.minerCount()) require.NoError(n.t, err) if options.mainMiner != nil { @@ -335,12 +313,25 @@ func (n *Ensemble) AddInactiveMiner(m *TestMiner) { n.inactive.miners = append(n.inactive.miners, m) } +func (n *Ensemble) AddInactiveUnmanagedMiner(m *TestUnmanagedMiner) { + n.inactive.unmanagedMiners = append(n.inactive.unmanagedMiners, m) +} + func (n *Ensemble) Miner(minerNode *TestMiner, full *TestFullNode, opts ...NodeOpt) *Ensemble { n.MinerEnroll(minerNode, full, opts...) n.AddInactiveMiner(minerNode) return n } +func (n *Ensemble) UnmanagedMiner(full *TestFullNode, opts ...NodeOpt) (*TestUnmanagedMiner, *Ensemble) { + actorAddr, err := address.NewIDAddress(genesis2.MinerStart + n.minerCount()) + require.NoError(n.t, err) + + minerNode := NewTestUnmanagedMiner(n.t, full, actorAddr, opts...) + n.AddInactiveUnmanagedMiner(minerNode) + return minerNode, n +} + // Worker enrolls a new worker, using the provided full node for chain // interactions. func (n *Ensemble) Worker(minerNode *TestMiner, worker *TestWorker, opts ...NodeOpt) *Ensemble { @@ -623,12 +614,10 @@ func (n *Ensemble) Start() *Ensemble { n.t.Fatalf("invalid config from repo, got: %T", c) } cfg.Common.API.RemoteListenAddress = m.RemoteListener.Addr().String() - cfg.Subsystems.EnableMarkets = m.options.subsystems.Has(SMarkets) cfg.Subsystems.EnableMining = m.options.subsystems.Has(SMining) cfg.Subsystems.EnableSealing = m.options.subsystems.Has(SSealing) cfg.Subsystems.EnableSectorStorage = m.options.subsystems.Has(SSectorStorage) cfg.Subsystems.EnableSectorIndexDB = m.options.subsystems.Has(SHarmony) - cfg.Dealmaking.MaxStagingDealsBytes = m.options.maxStagingDealsBytes if m.options.mainMiner != nil { token, err := m.options.mainMiner.FullNode.AuthNew(ctx, api.AllPermissions) @@ -714,7 +703,7 @@ func (n *Ensemble) Start() *Ensemble { m.FullNode = &minerCopy opts := []node.Option{ - node.StorageMiner(&m.StorageMiner, cfg.Subsystems), + node.StorageMiner(&m.StorageMiner), node.Base(), node.Repo(r), node.Test(), @@ -757,13 +746,6 @@ func (n *Ensemble) Start() *Ensemble { } }), } - - if m.options.subsystems.Has(SMarkets) { - opts = append(opts, - node.Override(new(idxprov.MeshCreator), idxprov_test.NewNoopMeshCreator), - ) - } - // append any node builder options. opts = append(opts, m.options.extraNodeOpts...) @@ -836,6 +818,79 @@ func (n *Ensemble) Start() *Ensemble { // to active, so clear the slice. n.inactive.miners = n.inactive.miners[:0] + // Create all inactive manual miners. + for _, m := range n.inactive.unmanagedMiners { + proofType, err := miner.WindowPoStProofTypeFromSectorSize(m.options.sectorSize, n.genesis.version) + require.NoError(n.t, err) + + params, aerr := actors.SerializeParams(&power3.CreateMinerParams{ + Owner: m.OwnerKey.Address, + Worker: m.OwnerKey.Address, + WindowPoStProofType: proofType, + Peer: abi.PeerID(m.Libp2p.PeerID), + }) + require.NoError(n.t, aerr) + + createStorageMinerMsg := &types.Message{ + From: m.OwnerKey.Address, + To: power.Address, + Value: big.Zero(), + + Method: power.Methods.CreateMiner, + Params: params, + } + signed, err := m.FullNode.FullNode.MpoolPushMessage(ctx, createStorageMinerMsg, &api.MessageSendSpec{ + MsgUuid: uuid.New(), + }) + require.NoError(n.t, err) + + mw, err := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) + require.NoError(n.t, err) + require.Equal(n.t, exitcode.Ok, mw.Receipt.ExitCode) + + var retval power3.CreateMinerReturn + err = retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return)) + require.NoError(n.t, err, "failed to create miner") + + m.ActorAddr = retval.IDAddress + + has, err := m.FullNode.WalletHas(ctx, m.OwnerKey.Address) + require.NoError(n.t, err) + + // Only import the owner's full key into our companion full node, if we + // don't have it still. + if !has { + _, err = m.FullNode.WalletImport(ctx, &m.OwnerKey.KeyInfo) + require.NoError(n.t, err) + } + + enc, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(m.Libp2p.PeerID)}) + require.NoError(n.t, err) + + msg := &types.Message{ + From: m.OwnerKey.Address, + To: m.ActorAddr, + Method: builtin.MethodsMiner.ChangePeerID, + Params: enc, + Value: types.NewInt(0), + } + + _, err2 := m.FullNode.MpoolPushMessage(ctx, msg, &api.MessageSendSpec{ + MsgUuid: uuid.New(), + }) + require.NoError(n.t, err2) + + minerCopy := *m.FullNode + minerCopy.FullNode = modules.MakeUuidWrapper(minerCopy.FullNode) + m.FullNode = &minerCopy + + n.active.unmanagedMiners = append(n.active.unmanagedMiners, m) + } + + // If we are here, we have processed all inactive manual miners and moved them + // to active, so clear the slice. + n.inactive.unmanagedMiners = n.inactive.unmanagedMiners[:0] + // --------------------- // WORKERS // --------------------- @@ -902,28 +957,6 @@ func (n *Ensemble) Start() *Ensemble { // to active, so clear the slice. n.inactive.workers = n.inactive.workers[:0] - for _, p := range n.inactive.providernodes { - - // TODO setup config with options - err := p.Deps.PopulateRemainingDeps(context.Background(), &cli.Context{}, false) - require.NoError(n.t, err) - - shutdownChan := make(chan struct{}) - taskEngine, err := tasks.StartTasks(ctx, p.Deps) - if err != nil { - return nil - } - defer taskEngine.GracefullyTerminate() - - err = rpc.ListenAndServe(ctx, p.Deps, shutdownChan) // Monitor for shutdown. - require.NoError(n.t, err) - finishCh := node.MonitorShutdown(shutdownChan) //node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper}, - //node.ShutdownHandler{Component: "provider", StopFunc: stop}, - - <-finishCh - - n.active.providernodes = append(n.active.providernodes, p) - } // --------------------- // MISC // --------------------- @@ -958,15 +991,6 @@ func (n *Ensemble) Start() *Ensemble { // InterconnectAll connects all miners and full nodes to one another. func (n *Ensemble) InterconnectAll() *Ensemble { - // connect full nodes to miners. - for _, from := range n.active.fullnodes { - for _, to := range n.active.miners { - // []*TestMiner to []api.CommonAPI type coercion not possible - // so cannot use variadic form. - n.Connect(from, to) - } - } - // connect full nodes between each other, skipping ourselves. last := len(n.active.fullnodes) - 1 for i, from := range n.active.fullnodes { @@ -1065,6 +1089,10 @@ func (n *Ensemble) BeginMining(blocktime time.Duration, miners ...*TestMiner) [] return bms } +func (n *Ensemble) minerCount() uint64 { + return uint64(len(n.inactive.miners) + len(n.active.miners) + len(n.inactive.unmanagedMiners) + len(n.active.unmanagedMiners)) +} + func (n *Ensemble) generateGenesis() *genesis.Template { var verifRoot = gen.DefaultVerifregRootkeyActor if k := n.options.verifiedRoot.key; k != nil { diff --git a/itests/kit/ensemble_opts_nv.go b/itests/kit/ensemble_opts_nv.go index 18b531e13b1..03239efe95a 100644 --- a/itests/kit/ensemble_opts_nv.go +++ b/itests/kit/ensemble_opts_nv.go @@ -35,12 +35,12 @@ func LatestActorsAt(upgradeHeight abi.ChainEpoch) EnsembleOpt { }) /* inline-gen start */ return UpgradeSchedule(stmgr.Upgrade{ - Network: network.Version21, + Network: network.Version22, Height: -1, }, stmgr.Upgrade{ - Network: network.Version22, + Network: network.Version23, Height: upgradeHeight, - Migration: filcns.UpgradeActorsV13, + Migration: filcns.UpgradeActorsV14, }) /* inline-gen end */ } diff --git a/itests/kit/ensemble_presets.go b/itests/kit/ensemble_presets.go index 68b85fde025..c3c17d4d96d 100644 --- a/itests/kit/ensemble_presets.go +++ b/itests/kit/ensemble_presets.go @@ -2,7 +2,6 @@ package kit import ( "testing" - "time" ) // EnsembleMinimal creates and starts an Ensemble with a single full node and a single miner. @@ -37,29 +36,6 @@ func EnsembleWorker(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMine return &full, &miner, &worker, ens } -func EnsembleWithMinerAndMarketNodes(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMiner, *TestMiner, *Ensemble) { - eopts, nopts := siftOptions(t, opts) - - var ( - fullnode TestFullNode - main, market TestMiner - ) - - mainNodeOpts := []NodeOpt{WithSubsystems(SSealing, SSectorStorage, SMining), DisableLibp2p()} - mainNodeOpts = append(mainNodeOpts, nopts...) - - blockTime := 100 * time.Millisecond - ens := NewEnsemble(t, eopts...).FullNode(&fullnode, nopts...).Miner(&main, &fullnode, mainNodeOpts...).Start() - ens.BeginMining(blockTime) - - marketNodeOpts := []NodeOpt{OwnerAddr(fullnode.DefaultKey), MainMiner(&main), WithSubsystems(SMarkets)} - marketNodeOpts = append(marketNodeOpts, nopts...) - - ens.Miner(&market, &fullnode, marketNodeOpts...).Start().Connect(market, fullnode) - - return &fullnode, &main, &market, ens -} - // EnsembleTwoOne creates and starts an Ensemble with two full nodes and one miner. // It does not interconnect nodes nor does it begin mining. // @@ -101,21 +77,6 @@ func EnsembleOneTwo(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMine return &full, &one, &two, ens } -// EnsembleProvider creates and starts an Ensemble with a single full node and a single provider. -// It does not interconnect nodes nor does it begin mining. -func EnsembleProvider(t *testing.T, opts ...interface{}) (*TestFullNode, *TestProviderNode, *Ensemble) { - opts = append(opts, WithAllSubsystems()) - - eopts, nopts := siftOptions(t, opts) - - var ( - full TestFullNode - provider TestProviderNode - ) - ens := NewEnsemble(t, eopts...).FullNode(&full, nopts...).Provider(&provider, nopts...).Start() - return &full, &provider, ens -} - func siftOptions(t *testing.T, opts []interface{}) (eopts []EnsembleOpt, nopts []NodeOpt) { for _, v := range opts { switch o := v.(type) { diff --git a/itests/kit/evm.go b/itests/kit/evm.go index 0d7af25782b..865d5c2f2b0 100644 --- a/itests/kit/evm.go +++ b/itests/kit/evm.go @@ -44,6 +44,49 @@ func (f *TestFullNode) EVM() *EVM { return &EVM{f} } +// SignLegacyHomesteadTransaction signs a legacy Homstead Ethereum transaction in place with the supplied private key. +func (e *EVM) SignLegacyEIP155Transaction(tx *ethtypes.EthLegacy155TxArgs, privKey []byte, chainID big.Int) { + preimage, err := tx.ToRlpUnsignedMsg() + require.NoError(e.t, err) + + // sign the RLP payload + signature, err := sigs.Sign(crypto.SigTypeDelegated, privKey, preimage) + require.NoError(e.t, err) + + signature.Data = append([]byte{ethtypes.EthLegacy155TxSignaturePrefix}, signature.Data...) + + chainIdMul := big.Mul(chainID, big.NewInt(2)) + vVal := big.Add(chainIdMul, big.NewIntUnsigned(35)) + + switch signature.Data[len(signature.Data)-1] { + case 0: + vVal = big.Add(vVal, big.NewInt(0)) + case 1: + vVal = big.Add(vVal, big.NewInt(1)) + } + + signature.Data = append(signature.Data[:65], vVal.Int.Bytes()...) + + err = tx.InitialiseSignature(*signature) + require.NoError(e.t, err) +} + +// SignLegacyHomesteadTransaction signs a legacy Homstead Ethereum transaction in place with the supplied private key. +func (e *EVM) SignLegacyHomesteadTransaction(tx *ethtypes.EthLegacyHomesteadTxArgs, privKey []byte) { + preimage, err := tx.ToRlpUnsignedMsg() + require.NoError(e.t, err) + + // sign the RLP payload + signature, err := sigs.Sign(crypto.SigTypeDelegated, privKey, preimage) + require.NoError(e.t, err) + + signature.Data = append([]byte{ethtypes.EthLegacyHomesteadTxSignaturePrefix}, signature.Data...) + signature.Data[len(signature.Data)-1] += 27 + + err = tx.InitialiseSignature(*signature) + require.NoError(e.t, err) +} + func (e *EVM) DeployContractWithValue(ctx context.Context, sender address.Address, bytecode []byte, value big.Int) eam.CreateReturn { require := require.New(e.t) @@ -208,7 +251,7 @@ func (e *EVM) AssertAddressBalanceConsistent(ctx context.Context, addr address.A } // SignTransaction signs an Ethereum transaction in place with the supplied private key. -func (e *EVM) SignTransaction(tx *ethtypes.EthTxArgs, privKey []byte) { +func (e *EVM) SignTransaction(tx *ethtypes.Eth1559TxArgs, privKey []byte) { preimage, err := tx.ToRlpUnsignedMsg() require.NoError(e.t, err) @@ -216,16 +259,12 @@ func (e *EVM) SignTransaction(tx *ethtypes.EthTxArgs, privKey []byte) { signature, err := sigs.Sign(crypto.SigTypeDelegated, privKey, preimage) require.NoError(e.t, err) - r, s, v, err := ethtypes.RecoverSignature(*signature) + err = tx.InitialiseSignature(*signature) require.NoError(e.t, err) - - tx.V = big.Int(v) - tx.R = big.Int(r) - tx.S = big.Int(s) } // SubmitTransaction submits the transaction via the Eth endpoint. -func (e *EVM) SubmitTransaction(ctx context.Context, tx *ethtypes.EthTxArgs) ethtypes.EthHash { +func (e *EVM) SubmitTransaction(ctx context.Context, tx ethtypes.EthTransaction) ethtypes.EthHash { signed, err := tx.ToRlpSignedMsg() require.NoError(e.t, err) diff --git a/itests/kit/node_full.go b/itests/kit/node_full.go index c36f05a7592..c71667a99d0 100644 --- a/itests/kit/node_full.go +++ b/itests/kit/node_full.go @@ -12,6 +12,7 @@ import ( "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/require" cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" @@ -22,7 +23,6 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/wallet/key" cliutil "github.com/filecoin-project/lotus/cli/util" - "github.com/filecoin-project/lotus/cmd/curio/deps" "github.com/filecoin-project/lotus/gateway" "github.com/filecoin-project/lotus/node" ) @@ -55,17 +55,6 @@ type TestFullNode struct { options nodeOpts } -// TestProviderNode represents a Provider node enrolled in an Ensemble. -type TestProviderNode struct { - v1api.CurioStruct - - t *testing.T - - *deps.Deps - - options nodeOpts -} - func MergeFullNodes(fullNodes []*TestFullNode) *TestFullNode { var wrappedFullNode TestFullNode var fns api.FullNodeStruct @@ -86,22 +75,6 @@ func (f TestFullNode) Shutdown(ctx context.Context) error { return f.Stop(ctx) } -func (f *TestFullNode) ClientImportCARFile(ctx context.Context, rseed int, size int) (res *api.ImportRes, carv1FilePath string, origFilePath string) { - carv1FilePath, origFilePath = CreateRandomCARv1(f.t, rseed, size) - res, err := f.ClientImport(ctx, api.FileRef{Path: carv1FilePath, IsCAR: true}) - require.NoError(f.t, err) - return res, carv1FilePath, origFilePath -} - -// CreateImportFile creates a random file with the specified seed and size, and -// imports it into the full node. -func (f *TestFullNode) CreateImportFile(ctx context.Context, rseed int, size int) (res *api.ImportRes, path string) { - path = CreateRandomFile(f.t, rseed, size) - res, err := f.ClientImport(ctx, api.FileRef{Path: path}) - require.NoError(f.t, err) - return res, path -} - // WaitTillChain waits until a specified chain condition is met. It returns // the first tipset where the condition is met. func (f *TestFullNode) WaitTillChain(ctx context.Context, pred ChainPredicate) *types.TipSet { @@ -125,6 +98,30 @@ func (f *TestFullNode) WaitTillChain(ctx context.Context, pred ChainPredicate) * return nil } +// WaitTillChain waits until a specified chain condition is met. It returns +// the first tipset where the condition is met. +func (f *TestFullNode) WaitTillChainOrError(ctx context.Context, pred ChainPredicate) (*types.TipSet, error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + heads, err := f.ChainNotify(ctx) + if err != nil { + return nil, err + } + + for chg := range heads { + for _, c := range chg { + if c.Type != "apply" { + continue + } + if ts := c.Val; pred(ts) { + return ts, nil + } + } + } + return nil, xerrors.New("chain condition not met") +} + func (f *TestFullNode) WaitForSectorActive(ctx context.Context, t *testing.T, sn abi.SectorNumber, maddr address.Address) { for { active, err := f.StateMinerActiveSectors(ctx, maddr, types.EmptyTSK) diff --git a/itests/kit/node_miner.go b/itests/kit/node_miner.go index ee2ee3eaae2..2e6a2b80a20 100644 --- a/itests/kit/node_miner.go +++ b/itests/kit/node_miner.go @@ -32,8 +32,7 @@ import ( type MinerSubsystem int const ( - SMarkets MinerSubsystem = 1 << iota - SMining + SMining MinerSubsystem = 1 << iota SSealing SSectorStorage diff --git a/itests/kit/node_opts.go b/itests/kit/node_opts.go index 1f4f9f6a4db..89c04b1c766 100644 --- a/itests/kit/node_opts.go +++ b/itests/kit/node_opts.go @@ -43,7 +43,6 @@ type nodeOpts struct { disableLibp2p bool optBuilders []OptBuilder sectorSize abi.SectorSize - maxStagingDealsBytes int64 minerNoLocalSealing bool // use worker minerAssigner string disallowRemoteFinalize bool @@ -84,7 +83,6 @@ type NodeOpt func(opts *nodeOpts) error func WithAllSubsystems() NodeOpt { return func(opts *nodeOpts) error { - opts.subsystems = opts.subsystems.Add(SMarkets) opts.subsystems = opts.subsystems.Add(SMining) opts.subsystems = opts.subsystems.Add(SSealing) opts.subsystems = opts.subsystems.Add(SSectorStorage) @@ -108,14 +106,6 @@ func WithSubsystems(systems ...MinerSubsystem) NodeOpt { return nil } } - -func WithMaxStagingDealsBytes(size int64) NodeOpt { - return func(opts *nodeOpts) error { - opts.maxStagingDealsBytes = size - return nil - } -} - func WithNoLocalSealing(nope bool) NodeOpt { return func(opts *nodeOpts) error { opts.minerNoLocalSealing = nope diff --git a/itests/kit/node_unmanaged.go b/itests/kit/node_unmanaged.go new file mode 100644 index 00000000000..562146f56a4 --- /dev/null +++ b/itests/kit/node_unmanaged.go @@ -0,0 +1,1072 @@ +package kit + +import ( + "bytes" + "context" + "crypto/rand" + "fmt" + "io" + "os" + "path/filepath" + "testing" + + "github.com/ipfs/go-cid" + libp2pcrypto "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" + cbg "github.com/whyrusleeping/cbor-gen" + + ffi "github.com/filecoin-project/filecoin-ffi" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/builtin" + miner14 "github.com/filecoin-project/go-state-types/builtin/v14/miner" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/proof" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet/key" +) + +// TestUnmanagedMiner is a miner that's not managed by the storage/infrastructure, all tasks must be manually executed, managed and scheduled by the test or test kit. +// Note: `TestUnmanagedMiner` is not thread safe and assumes linear access of it's methods +type TestUnmanagedMiner struct { + t *testing.T + options nodeOpts + + cacheDir string + unsealedSectorDir string + sealedSectorDir string + currentSectorNum abi.SectorNumber + + cacheDirPaths map[abi.SectorNumber]string + unsealedSectorPaths map[abi.SectorNumber]string + sealedSectorPaths map[abi.SectorNumber]string + sealedCids map[abi.SectorNumber]cid.Cid + unsealedCids map[abi.SectorNumber]cid.Cid + sealTickets map[abi.SectorNumber]abi.SealRandomness + + proofType map[abi.SectorNumber]abi.RegisteredSealProof + + ActorAddr address.Address + OwnerKey *key.Key + FullNode *TestFullNode + Libp2p struct { + PeerID peer.ID + PrivKey libp2pcrypto.PrivKey + } +} + +type WindowPostResp struct { + Posted bool + Error error +} + +func NewTestUnmanagedMiner(t *testing.T, full *TestFullNode, actorAddr address.Address, opts ...NodeOpt) *TestUnmanagedMiner { + require.NotNil(t, full, "full node required when instantiating miner") + + options := DefaultNodeOpts + for _, o := range opts { + err := o(&options) + require.NoError(t, err) + } + + privkey, _, err := libp2pcrypto.GenerateEd25519Key(rand.Reader) + require.NoError(t, err) + + require.NotNil(t, options.ownerKey, "owner key is required for initializing a miner") + + peerId, err := peer.IDFromPrivateKey(privkey) + require.NoError(t, err) + tmpDir := t.TempDir() + + cacheDir := filepath.Join(tmpDir, fmt.Sprintf("cache-%s", actorAddr)) + unsealedSectorDir := filepath.Join(tmpDir, fmt.Sprintf("unsealed-%s", actorAddr)) + sealedSectorDir := filepath.Join(tmpDir, fmt.Sprintf("sealed-%s", actorAddr)) + + _ = os.Mkdir(cacheDir, 0755) + _ = os.Mkdir(unsealedSectorDir, 0755) + _ = os.Mkdir(sealedSectorDir, 0755) + + tm := TestUnmanagedMiner{ + t: t, + options: options, + cacheDir: cacheDir, + unsealedSectorDir: unsealedSectorDir, + sealedSectorDir: sealedSectorDir, + + unsealedSectorPaths: make(map[abi.SectorNumber]string), + cacheDirPaths: make(map[abi.SectorNumber]string), + sealedSectorPaths: make(map[abi.SectorNumber]string), + sealedCids: make(map[abi.SectorNumber]cid.Cid), + unsealedCids: make(map[abi.SectorNumber]cid.Cid), + sealTickets: make(map[abi.SectorNumber]abi.SealRandomness), + + ActorAddr: actorAddr, + OwnerKey: options.ownerKey, + FullNode: full, + currentSectorNum: 101, + proofType: make(map[abi.SectorNumber]abi.RegisteredSealProof), + } + tm.Libp2p.PeerID = peerId + tm.Libp2p.PrivKey = privkey + + return &tm +} + +func (tm *TestUnmanagedMiner) AssertNoPower(ctx context.Context) { + p := tm.CurrentPower(ctx) + tm.t.Logf("Miner %s RBP: %v, QaP: %v", tm.ActorAddr, p.MinerPower.QualityAdjPower.String(), p.MinerPower.RawBytePower.String()) + require.True(tm.t, p.MinerPower.RawBytePower.IsZero()) +} + +func (tm *TestUnmanagedMiner) CurrentPower(ctx context.Context) *api.MinerPower { + head, err := tm.FullNode.ChainHead(ctx) + require.NoError(tm.t, err) + + p, err := tm.FullNode.StateMinerPower(ctx, tm.ActorAddr, head.Key()) + require.NoError(tm.t, err) + + return p +} + +func (tm *TestUnmanagedMiner) AssertPower(ctx context.Context, raw uint64, qa uint64) { + req := require.New(tm.t) + p := tm.CurrentPower(ctx) + tm.t.Logf("Miner %s RBP: %v, QaP: %v", p.MinerPower.QualityAdjPower.String(), tm.ActorAddr, p.MinerPower.RawBytePower.String()) + req.Equal(raw, p.MinerPower.RawBytePower.Uint64()) + req.Equal(qa, p.MinerPower.QualityAdjPower.Uint64()) +} + +func (tm *TestUnmanagedMiner) mkAndSavePiecesToOnboard(_ context.Context, sectorNumber abi.SectorNumber, pt abi.RegisteredSealProof) []abi.PieceInfo { + paddedPieceSize := abi.PaddedPieceSize(tm.options.sectorSize) + unpaddedPieceSize := paddedPieceSize.Unpadded() + + // Generate random bytes for the piece + randomBytes := make([]byte, unpaddedPieceSize) + _, err := io.ReadFull(rand.Reader, randomBytes) + require.NoError(tm.t, err) + + // Create a temporary file for the first piece + pieceFileA := requireTempFile(tm.t, bytes.NewReader(randomBytes), uint64(unpaddedPieceSize)) + + // Generate the piece CID from the file + pieceCIDA, err := ffi.GeneratePieceCIDFromFile(pt, pieceFileA, unpaddedPieceSize) + require.NoError(tm.t, err) + + // Reset file offset to the beginning after CID generation + _, err = pieceFileA.Seek(0, io.SeekStart) + require.NoError(tm.t, err) + + unsealedSectorFile := requireTempFile(tm.t, bytes.NewReader([]byte{}), 0) + defer func() { + _ = unsealedSectorFile.Close() + }() + + // Write the piece to the staged sector file without alignment + writtenBytes, pieceCID, err := ffi.WriteWithoutAlignment(pt, pieceFileA, unpaddedPieceSize, unsealedSectorFile) + require.NoError(tm.t, err) + require.EqualValues(tm.t, unpaddedPieceSize, writtenBytes) + require.True(tm.t, pieceCID.Equals(pieceCIDA)) + + // Create a struct for the piece info + publicPieces := []abi.PieceInfo{{ + Size: paddedPieceSize, + PieceCID: pieceCIDA, + }} + + // Create a temporary file for the sealed sector + sealedSectorFile := requireTempFile(tm.t, bytes.NewReader([]byte{}), 0) + defer func() { + _ = sealedSectorFile.Close() + }() + + // Update paths for the sector + tm.sealedSectorPaths[sectorNumber] = sealedSectorFile.Name() + tm.unsealedSectorPaths[sectorNumber] = unsealedSectorFile.Name() + tm.cacheDirPaths[sectorNumber] = filepath.Join(tm.cacheDir, fmt.Sprintf("%d", sectorNumber)) + + // Ensure the cache directory exists + _ = os.Mkdir(tm.cacheDirPaths[sectorNumber], 0755) + + return publicPieces +} + +func (tm *TestUnmanagedMiner) makeAndSaveCCSector(_ context.Context, sectorNumber abi.SectorNumber) { + requirements := require.New(tm.t) + + // Create cache directory + cacheDirPath := filepath.Join(tm.cacheDir, fmt.Sprintf("%d", sectorNumber)) + requirements.NoError(os.Mkdir(cacheDirPath, 0755)) + tm.t.Logf("Miner %s: Sector %d: created cache directory at %s", tm.ActorAddr, sectorNumber, cacheDirPath) + + // Define paths for unsealed and sealed sectors + unsealedSectorPath := filepath.Join(tm.unsealedSectorDir, fmt.Sprintf("%d", sectorNumber)) + sealedSectorPath := filepath.Join(tm.sealedSectorDir, fmt.Sprintf("%d", sectorNumber)) + unsealedSize := abi.PaddedPieceSize(tm.options.sectorSize).Unpadded() + + // Write unsealed sector file + requirements.NoError(os.WriteFile(unsealedSectorPath, make([]byte, unsealedSize), 0644)) + tm.t.Logf("Miner %s: Sector %d: wrote unsealed CC sector to %s", tm.ActorAddr, sectorNumber, unsealedSectorPath) + + // Write sealed sector file + requirements.NoError(os.WriteFile(sealedSectorPath, make([]byte, tm.options.sectorSize), 0644)) + tm.t.Logf("Miner %s: Sector %d: wrote sealed CC sector to %s", tm.ActorAddr, sectorNumber, sealedSectorPath) + + // Update paths in the struct + tm.unsealedSectorPaths[sectorNumber] = unsealedSectorPath + tm.sealedSectorPaths[sectorNumber] = sealedSectorPath + tm.cacheDirPaths[sectorNumber] = cacheDirPath +} + +func (tm *TestUnmanagedMiner) OnboardSectorWithPiecesAndRealProofs(ctx context.Context, proofType abi.RegisteredSealProof) (abi.SectorNumber, chan WindowPostResp, + context.CancelFunc) { + req := require.New(tm.t) + sectorNumber := tm.currentSectorNum + tm.currentSectorNum++ + + // Step 1: Wait for the pre-commitseal randomness to be available (we can only draw seal randomness from tipsets that have already achieved finality) + preCommitSealRand := tm.waitPreCommitSealRandomness(ctx, sectorNumber) + + // Step 2: Build a sector with non 0 Pieces that we want to onboard + pieces := tm.mkAndSavePiecesToOnboard(ctx, sectorNumber, proofType) + + // Step 3: Generate a Pre-Commit for the CC sector -> this persists the proof on the `TestUnmanagedMiner` Miner State + tm.generatePreCommit(ctx, sectorNumber, preCommitSealRand, proofType, pieces) + + // Step 4 : Submit the Pre-Commit to the network + unsealedCid := tm.unsealedCids[sectorNumber] + r, err := tm.submitMessage(ctx, &miner14.PreCommitSectorBatchParams2{ + Sectors: []miner14.SectorPreCommitInfo{{ + Expiration: 2880 * 300, + SectorNumber: sectorNumber, + SealProof: TestSpt, + SealedCID: tm.sealedCids[sectorNumber], + SealRandEpoch: preCommitSealRand, + UnsealedCid: &unsealedCid, + }}, + }, 1, builtin.MethodsMiner.PreCommitSectorBatch2) + req.NoError(err) + req.True(r.Receipt.ExitCode.IsSuccess()) + + // Step 5: Generate a ProveCommit for the CC sector + waitSeedRandomness := tm.proveCommitWaitSeed(ctx, sectorNumber) + + proveCommit := tm.generateProveCommit(ctx, sectorNumber, proofType, waitSeedRandomness, pieces) + + // Step 6: Submit the ProveCommit to the network + tm.t.Log("Submitting ProveCommitSector ...") + + var manifest []miner14.PieceActivationManifest + for _, piece := range pieces { + manifest = append(manifest, miner14.PieceActivationManifest{ + CID: piece.PieceCID, + Size: piece.Size, + }) + } + + r, err = tm.submitMessage(ctx, &miner14.ProveCommitSectors3Params{ + SectorActivations: []miner14.SectorActivationManifest{{SectorNumber: sectorNumber, Pieces: manifest}}, + SectorProofs: [][]byte{proveCommit}, + RequireActivationSuccess: true, + }, 1, builtin.MethodsMiner.ProveCommitSectors3) + req.NoError(err) + req.True(r.Receipt.ExitCode.IsSuccess()) + + tm.proofType[sectorNumber] = proofType + + respCh := make(chan WindowPostResp, 1) + + wdCtx, cancelF := context.WithCancel(ctx) + go tm.wdPostLoop(wdCtx, sectorNumber, respCh, false, tm.sealedCids[sectorNumber], tm.sealedSectorPaths[sectorNumber], tm.cacheDirPaths[sectorNumber]) + + return sectorNumber, respCh, cancelF +} + +func (tm *TestUnmanagedMiner) OnboardSectorWithPiecesAndMockProofs(ctx context.Context, proofType abi.RegisteredSealProof) (abi.SectorNumber, chan WindowPostResp, + context.CancelFunc) { + req := require.New(tm.t) + sectorNumber := tm.currentSectorNum + tm.currentSectorNum++ + + // Step 1: Wait for the pre-commitseal randomness to be available (we can only draw seal randomness from tipsets that have already achieved finality) + preCommitSealRand := tm.waitPreCommitSealRandomness(ctx, sectorNumber) + + // Step 2: Build a sector with non 0 Pieces that we want to onboard + pieces := []abi.PieceInfo{{ + Size: abi.PaddedPieceSize(tm.options.sectorSize), + PieceCID: cid.MustParse("baga6ea4seaqjtovkwk4myyzj56eztkh5pzsk5upksan6f5outesy62bsvl4dsha"), + }} + + // Step 3: Generate a Pre-Commit for the CC sector -> this persists the proof on the `TestUnmanagedMiner` Miner State + tm.sealedCids[sectorNumber] = cid.MustParse("bagboea4b5abcatlxechwbp7kjpjguna6r6q7ejrhe6mdp3lf34pmswn27pkkiekz") + tm.unsealedCids[sectorNumber] = cid.MustParse("baga6ea4seaqjtovkwk4myyzj56eztkh5pzsk5upksan6f5outesy62bsvl4dsha") + + // Step 4 : Submit the Pre-Commit to the network + unsealedCid := tm.unsealedCids[sectorNumber] + r, err := tm.submitMessage(ctx, &miner14.PreCommitSectorBatchParams2{ + Sectors: []miner14.SectorPreCommitInfo{{ + Expiration: 2880 * 300, + SectorNumber: sectorNumber, + SealProof: TestSpt, + SealedCID: tm.sealedCids[sectorNumber], + SealRandEpoch: preCommitSealRand, + UnsealedCid: &unsealedCid, + }}, + }, 1, builtin.MethodsMiner.PreCommitSectorBatch2) + req.NoError(err) + req.True(r.Receipt.ExitCode.IsSuccess()) + + // Step 5: Generate a ProveCommit for the CC sector + _ = tm.proveCommitWaitSeed(ctx, sectorNumber) + sectorProof := []byte{0xde, 0xad, 0xbe, 0xef} + + // Step 6: Submit the ProveCommit to the network + tm.t.Log("Submitting ProveCommitSector ...") + + var manifest []miner14.PieceActivationManifest + for _, piece := range pieces { + manifest = append(manifest, miner14.PieceActivationManifest{ + CID: piece.PieceCID, + Size: piece.Size, + }) + } + + r, err = tm.submitMessage(ctx, &miner14.ProveCommitSectors3Params{ + SectorActivations: []miner14.SectorActivationManifest{{SectorNumber: sectorNumber, Pieces: manifest}}, + SectorProofs: [][]byte{sectorProof}, + RequireActivationSuccess: true, + }, 1, builtin.MethodsMiner.ProveCommitSectors3) + req.NoError(err) + req.True(r.Receipt.ExitCode.IsSuccess()) + + tm.proofType[sectorNumber] = proofType + + respCh := make(chan WindowPostResp, 1) + + wdCtx, cancelF := context.WithCancel(ctx) + go tm.wdPostLoop(wdCtx, sectorNumber, respCh, true, tm.sealedCids[sectorNumber], tm.sealedSectorPaths[sectorNumber], tm.cacheDirPaths[sectorNumber]) + + return sectorNumber, respCh, cancelF +} + +func (tm *TestUnmanagedMiner) mkStagedFileWithPieces(pt abi.RegisteredSealProof) ([]abi.PieceInfo, string) { + paddedPieceSize := abi.PaddedPieceSize(tm.options.sectorSize) + unpaddedPieceSize := paddedPieceSize.Unpadded() + + // Generate random bytes for the piece + randomBytes := make([]byte, unpaddedPieceSize) + _, err := io.ReadFull(rand.Reader, randomBytes) + require.NoError(tm.t, err) + + // Create a temporary file for the first piece + pieceFileA := requireTempFile(tm.t, bytes.NewReader(randomBytes), uint64(unpaddedPieceSize)) + + // Generate the piece CID from the file + pieceCIDA, err := ffi.GeneratePieceCIDFromFile(pt, pieceFileA, unpaddedPieceSize) + require.NoError(tm.t, err) + + // Reset file offset to the beginning after CID generation + _, err = pieceFileA.Seek(0, io.SeekStart) + require.NoError(tm.t, err) + + unsealedSectorFile := requireTempFile(tm.t, bytes.NewReader([]byte{}), 0) + defer func() { + _ = unsealedSectorFile.Close() + }() + + // Write the piece to the staged sector file without alignment + writtenBytes, pieceCID, err := ffi.WriteWithoutAlignment(pt, pieceFileA, unpaddedPieceSize, unsealedSectorFile) + require.NoError(tm.t, err) + require.EqualValues(tm.t, unpaddedPieceSize, writtenBytes) + require.True(tm.t, pieceCID.Equals(pieceCIDA)) + + // Create a struct for the piece info + publicPieces := []abi.PieceInfo{{ + Size: paddedPieceSize, + PieceCID: pieceCIDA, + }} + + return publicPieces, unsealedSectorFile.Name() +} + +func (tm *TestUnmanagedMiner) SnapDealWithRealProofs(ctx context.Context, proofType abi.RegisteredSealProof, sectorNumber abi.SectorNumber) { + // generate sector key + pieces, unsealedPath := tm.mkStagedFileWithPieces(proofType) + updateProofType := abi.SealProofInfos[proofType].UpdateProof + + s, err := os.Stat(tm.sealedSectorPaths[sectorNumber]) + require.NoError(tm.t, err) + + randomBytes := make([]byte, s.Size()) + _, err = io.ReadFull(rand.Reader, randomBytes) + require.NoError(tm.t, err) + + updatePath := requireTempFile(tm.t, bytes.NewReader(randomBytes), uint64(s.Size())) + require.NoError(tm.t, updatePath.Close()) + updateDir := filepath.Join(tm.t.TempDir(), fmt.Sprintf("update-%d", sectorNumber)) + require.NoError(tm.t, os.MkdirAll(updateDir, 0700)) + + newSealed, newUnsealed, err := ffi.SectorUpdate.EncodeInto(updateProofType, updatePath.Name(), updateDir, + tm.sealedSectorPaths[sectorNumber], tm.cacheDirPaths[sectorNumber], unsealedPath, pieces) + require.NoError(tm.t, err) + + vp, err := ffi.SectorUpdate.GenerateUpdateVanillaProofs(updateProofType, tm.sealedCids[sectorNumber], + newSealed, newUnsealed, updatePath.Name(), updateDir, tm.sealedSectorPaths[sectorNumber], + tm.cacheDirPaths[sectorNumber]) + require.NoError(tm.t, err) + + snapProof, err := ffi.SectorUpdate.GenerateUpdateProofWithVanilla(updateProofType, tm.sealedCids[sectorNumber], + newSealed, newUnsealed, vp) + require.NoError(tm.t, err) + tm.waitForMutableDeadline(ctx, sectorNumber) + + // submit proof + var manifest []miner14.PieceActivationManifest + for _, piece := range pieces { + manifest = append(manifest, miner14.PieceActivationManifest{ + CID: piece.PieceCID, + Size: piece.Size, + }) + } + + head, err := tm.FullNode.ChainHead(ctx) + require.NoError(tm.t, err) + + sl, err := tm.FullNode.StateSectorPartition(ctx, tm.ActorAddr, sectorNumber, head.Key()) + require.NoError(tm.t, err) + + params := &miner14.ProveReplicaUpdates3Params{ + SectorUpdates: []miner14.SectorUpdateManifest{ + { + Sector: sectorNumber, + Deadline: sl.Deadline, + Partition: sl.Partition, + NewSealedCID: newSealed, + Pieces: manifest, + }, + }, + SectorProofs: [][]byte{snapProof}, + UpdateProofsType: updateProofType, + RequireActivationSuccess: true, + RequireNotificationSuccess: false, + } + r, err := tm.submitMessage(ctx, params, 1, builtin.MethodsMiner.ProveReplicaUpdates3) + require.NoError(tm.t, err) + require.True(tm.t, r.Receipt.ExitCode.IsSuccess()) +} + +func (tm *TestUnmanagedMiner) waitForMutableDeadline(ctx context.Context, sectorNum abi.SectorNumber) { + ts, err := tm.FullNode.ChainHead(ctx) + require.NoError(tm.t, err) + + sl, err := tm.FullNode.StateSectorPartition(ctx, tm.ActorAddr, sectorNum, ts.Key()) + require.NoError(tm.t, err) + + dlinfo, err := tm.FullNode.StateMinerProvingDeadline(ctx, tm.ActorAddr, ts.Key()) + require.NoError(tm.t, err) + + sectorDeadlineOpen := sl.Deadline == dlinfo.Index + sectorDeadlineNext := (dlinfo.Index+1)%dlinfo.WPoStPeriodDeadlines == sl.Deadline + immutable := sectorDeadlineOpen || sectorDeadlineNext + + // Sleep for immutable epochs + if immutable { + dlineEpochsRemaining := dlinfo.NextOpen() - ts.Height() + var targetEpoch abi.ChainEpoch + if sectorDeadlineOpen { + // sleep for remainder of deadline + targetEpoch = ts.Height() + dlineEpochsRemaining + } else { + // sleep for remainder of deadline and next one + targetEpoch = ts.Height() + dlineEpochsRemaining + dlinfo.WPoStChallengeWindow + } + _, err := tm.FullNode.WaitTillChainOrError(ctx, HeightAtLeast(targetEpoch+5)) + require.NoError(tm.t, err) + } +} + +func (tm *TestUnmanagedMiner) OnboardCCSectorWithMockProofs(ctx context.Context, proofType abi.RegisteredSealProof) (abi.SectorNumber, chan WindowPostResp, + context.CancelFunc) { + req := require.New(tm.t) + sectorNumber := tm.currentSectorNum + tm.currentSectorNum++ + + // Step 1: Wait for the pre-commitseal randomness to be available (we can only draw seal randomness from tipsets that have already achieved finality) + preCommitSealRand := tm.waitPreCommitSealRandomness(ctx, sectorNumber) + + tm.sealedCids[sectorNumber] = cid.MustParse("bagboea4b5abcatlxechwbp7kjpjguna6r6q7ejrhe6mdp3lf34pmswn27pkkiekz") + + // Step 4 : Submit the Pre-Commit to the network + r, err := tm.submitMessage(ctx, &miner14.PreCommitSectorBatchParams2{ + Sectors: []miner14.SectorPreCommitInfo{{ + Expiration: 2880 * 300, + SectorNumber: sectorNumber, + SealProof: TestSpt, + SealedCID: tm.sealedCids[sectorNumber], + SealRandEpoch: preCommitSealRand, + }}, + }, 1, builtin.MethodsMiner.PreCommitSectorBatch2) + req.NoError(err) + req.True(r.Receipt.ExitCode.IsSuccess()) + + // Step 5: Generate a ProveCommit for the CC sector + _ = tm.proveCommitWaitSeed(ctx, sectorNumber) + sectorProof := []byte{0xde, 0xad, 0xbe, 0xef} + + // Step 6: Submit the ProveCommit to the network + tm.t.Log("Submitting ProveCommitSector ...") + + r, err = tm.submitMessage(ctx, &miner14.ProveCommitSectors3Params{ + SectorActivations: []miner14.SectorActivationManifest{{SectorNumber: sectorNumber}}, + SectorProofs: [][]byte{sectorProof}, + RequireActivationSuccess: true, + }, 0, builtin.MethodsMiner.ProveCommitSectors3) + req.NoError(err) + req.True(r.Receipt.ExitCode.IsSuccess()) + + tm.proofType[sectorNumber] = proofType + + respCh := make(chan WindowPostResp, 1) + + wdCtx, cancelF := context.WithCancel(ctx) + go tm.wdPostLoop(wdCtx, sectorNumber, respCh, true, tm.sealedCids[sectorNumber], tm.sealedSectorPaths[sectorNumber], tm.cacheDirPaths[sectorNumber]) + + return sectorNumber, respCh, cancelF +} + +func (tm *TestUnmanagedMiner) OnboardCCSectorWithRealProofs(ctx context.Context, proofType abi.RegisteredSealProof) (abi.SectorNumber, chan WindowPostResp, + context.CancelFunc) { + req := require.New(tm.t) + sectorNumber := tm.currentSectorNum + tm.currentSectorNum++ + + // --------------------Create pre-commit for the CC sector -> we'll just pre-commit `sector size` worth of 0s for this CC sector + + // Step 1: Wait for the pre-commitseal randomness to be available (we can only draw seal randomness from tipsets that have already achieved finality) + preCommitSealRand := tm.waitPreCommitSealRandomness(ctx, sectorNumber) + + // Step 2: Write empty bytes that we want to seal i.e. create our CC sector + tm.makeAndSaveCCSector(ctx, sectorNumber) + + // Step 3: Generate a Pre-Commit for the CC sector -> this persists the proof on the `TestUnmanagedMiner` Miner State + tm.generatePreCommit(ctx, sectorNumber, preCommitSealRand, proofType, []abi.PieceInfo{}) + + // Step 4 : Submit the Pre-Commit to the network + r, err := tm.submitMessage(ctx, &miner14.PreCommitSectorBatchParams2{ + Sectors: []miner14.SectorPreCommitInfo{{ + Expiration: 2880 * 300, + SectorNumber: sectorNumber, + SealProof: TestSpt, + SealedCID: tm.sealedCids[sectorNumber], + SealRandEpoch: preCommitSealRand, + }}, + }, 1, builtin.MethodsMiner.PreCommitSectorBatch2) + req.NoError(err) + req.True(r.Receipt.ExitCode.IsSuccess()) + + // Step 5: Generate a ProveCommit for the CC sector + waitSeedRandomness := tm.proveCommitWaitSeed(ctx, sectorNumber) + + proveCommit := tm.generateProveCommit(ctx, sectorNumber, proofType, waitSeedRandomness, []abi.PieceInfo{}) + + // Step 6: Submit the ProveCommit to the network + tm.t.Log("Submitting ProveCommitSector ...") + + r, err = tm.submitMessage(ctx, &miner14.ProveCommitSectors3Params{ + SectorActivations: []miner14.SectorActivationManifest{{SectorNumber: sectorNumber}}, + SectorProofs: [][]byte{proveCommit}, + RequireActivationSuccess: true, + }, 0, builtin.MethodsMiner.ProveCommitSectors3) + req.NoError(err) + req.True(r.Receipt.ExitCode.IsSuccess()) + + tm.proofType[sectorNumber] = proofType + + respCh := make(chan WindowPostResp, 1) + + wdCtx, cancelF := context.WithCancel(ctx) + go tm.wdPostLoop(wdCtx, sectorNumber, respCh, false, tm.sealedCids[sectorNumber], tm.sealedSectorPaths[sectorNumber], tm.cacheDirPaths[sectorNumber]) + + return sectorNumber, respCh, cancelF +} + +func (tm *TestUnmanagedMiner) wdPostLoop(ctx context.Context, sectorNumber abi.SectorNumber, respCh chan WindowPostResp, withMockProofs bool, sealedCid cid.Cid, sealedPath, cacheDir string) { + go func() { + var firstPost bool + + writeRespF := func(respErr error) { + var send WindowPostResp + if respErr == nil { + if firstPost { + return // already reported on our first post, no error to report, don't send anything + } + send.Posted = true + firstPost = true + } else { + if ctx.Err() == nil { + tm.t.Logf("Sector %d: WindowPoSt submission failed: %s", sectorNumber, respErr) + } + send.Error = respErr + } + select { + case respCh <- send: + case <-ctx.Done(): + default: + } + } + + var postCount int + for ctx.Err() == nil { + currentEpoch, nextPost, err := tm.calculateNextPostEpoch(ctx, sectorNumber) + tm.t.Logf("Activating sector %d, next post %d, current epoch %d", sectorNumber, nextPost, currentEpoch) + if err != nil { + writeRespF(err) + return + } + + if nextPost > currentEpoch { + if _, err := tm.FullNode.WaitTillChainOrError(ctx, HeightAtLeast(nextPost)); err != nil { + writeRespF(err) + return + } + } + + err = tm.submitWindowPost(ctx, sectorNumber, withMockProofs, sealedCid, sealedPath, cacheDir) + writeRespF(err) // send an error, or first post, or nothing if no error and this isn't the first post + postCount++ + tm.t.Logf("Sector %d: WindowPoSt #%d submitted", sectorNumber, postCount) + } + }() +} + +func (tm *TestUnmanagedMiner) SubmitPostDispute(ctx context.Context, sectorNumber abi.SectorNumber) error { + tm.t.Logf("Miner %s: Starting dispute submission for sector %d", tm.ActorAddr, sectorNumber) + + head, err := tm.FullNode.ChainHead(ctx) + if err != nil { + return fmt.Errorf("MinerB(%s): failed to get chain head: %w", tm.ActorAddr, err) + } + + sp, err := tm.FullNode.StateSectorPartition(ctx, tm.ActorAddr, sectorNumber, head.Key()) + if err != nil { + return fmt.Errorf("MinerB(%s): failed to get sector partition for sector %d: %w", tm.ActorAddr, sectorNumber, err) + } + + di, err := tm.FullNode.StateMinerProvingDeadline(ctx, tm.ActorAddr, head.Key()) + if err != nil { + return fmt.Errorf("MinerB(%s): failed to get proving deadline for sector %d: %w", tm.ActorAddr, sectorNumber, err) + } + + disputeEpoch := di.Close + 5 + tm.t.Logf("Miner %s: Sector %d - Waiting %d epochs until epoch %d to submit dispute", tm.ActorAddr, sectorNumber, disputeEpoch-head.Height(), disputeEpoch) + + tm.FullNode.WaitTillChain(ctx, HeightAtLeast(disputeEpoch)) + + tm.t.Logf("Miner %s: Sector %d - Disputing WindowedPoSt to confirm validity at epoch %d", tm.ActorAddr, sectorNumber, disputeEpoch) + + _, err = tm.submitMessage(ctx, &miner14.DisputeWindowedPoStParams{ + Deadline: sp.Deadline, + PoStIndex: 0, + }, 1, builtin.MethodsMiner.DisputeWindowedPoSt) + return err +} + +func (tm *TestUnmanagedMiner) submitWindowPost(ctx context.Context, sectorNumber abi.SectorNumber, withMockProofs bool, sealedCid cid.Cid, sealedPath, cacheDir string) error { + tm.t.Logf("Miner(%s): WindowPoST(%d): Running WindowPoSt ...\n", tm.ActorAddr, sectorNumber) + + head, err := tm.FullNode.ChainHead(ctx) + if err != nil { + return fmt.Errorf("Miner(%s): failed to get chain head: %w", tm.ActorAddr, err) + } + + sp, err := tm.FullNode.StateSectorPartition(ctx, tm.ActorAddr, sectorNumber, head.Key()) + if err != nil { + return fmt.Errorf("Miner(%s): failed to get sector partition for sector %d: %w", tm.ActorAddr, sectorNumber, err) + } + + di, err := tm.FullNode.StateMinerProvingDeadline(ctx, tm.ActorAddr, head.Key()) + if err != nil { + return fmt.Errorf("Miner(%s): failed to get proving deadline for sector %d: %w", tm.ActorAddr, sectorNumber, err) + } + tm.t.Logf("Miner(%s): WindowPoST(%d): SectorPartition: %+v, ProvingDeadline: %+v\n", tm.ActorAddr, sectorNumber, sp, di) + if di.Index != sp.Deadline { + return fmt.Errorf("Miner(%s): sector %d is not in the deadline %d, but %d", tm.ActorAddr, sectorNumber, sp.Deadline, di.Index) + } + + var proofBytes []byte + if withMockProofs { + proofBytes = []byte{0xde, 0xad, 0xbe, 0xef} + } else { + proofBytes, err = tm.generateWindowPost(ctx, sectorNumber, sealedCid, sealedPath, cacheDir) + if err != nil { + return fmt.Errorf("Miner(%s): failed to generate window post for sector %d: %w", tm.ActorAddr, sectorNumber, err) + } + } + + tm.t.Logf("Miner(%s): WindowedPoSt(%d) Submitting ...\n", tm.ActorAddr, sectorNumber) + + chainRandomnessEpoch := di.Challenge + chainRandomness, err := tm.FullNode.StateGetRandomnessFromTickets(ctx, crypto.DomainSeparationTag_PoStChainCommit, chainRandomnessEpoch, + nil, head.Key()) + if err != nil { + return fmt.Errorf("Miner(%s): failed to get chain randomness for sector %d: %w", tm.ActorAddr, sectorNumber, err) + } + + minerInfo, err := tm.FullNode.StateMinerInfo(ctx, tm.ActorAddr, head.Key()) + if err != nil { + return fmt.Errorf("Miner(%s): failed to get miner info for sector %d: %w", tm.ActorAddr, sectorNumber, err) + } + + r, err := tm.submitMessage(ctx, &miner14.SubmitWindowedPoStParams{ + ChainCommitEpoch: chainRandomnessEpoch, + ChainCommitRand: chainRandomness, + Deadline: sp.Deadline, + Partitions: []miner14.PoStPartition{{Index: sp.Partition}}, + Proofs: []proof.PoStProof{{PoStProof: minerInfo.WindowPoStProofType, ProofBytes: proofBytes}}, + }, 0, builtin.MethodsMiner.SubmitWindowedPoSt) + if err != nil { + return fmt.Errorf("Miner(%s): failed to submit window post for sector %d: %w", tm.ActorAddr, sectorNumber, err) + } + + if !r.Receipt.ExitCode.IsSuccess() { + return fmt.Errorf("Miner(%s): submitting PoSt for sector %d failed: %s", tm.ActorAddr, sectorNumber, r.Receipt.ExitCode) + } + + tm.t.Logf("Miner(%s): WindowedPoSt(%d) Submitted ...\n", tm.ActorAddr, sectorNumber) + + return nil +} + +func (tm *TestUnmanagedMiner) generateWindowPost( + ctx context.Context, + sectorNumber abi.SectorNumber, + sealedCid cid.Cid, + sealedPath string, + cacheDir string, +) ([]byte, error) { + head, err := tm.FullNode.ChainHead(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get chain head: %w", err) + } + + minerInfo, err := tm.FullNode.StateMinerInfo(ctx, tm.ActorAddr, head.Key()) + if err != nil { + return nil, fmt.Errorf("failed to get miner info: %w", err) + } + + di, err := tm.FullNode.StateMinerProvingDeadline(ctx, tm.ActorAddr, types.EmptyTSK) + if err != nil { + return nil, fmt.Errorf("failed to get proving deadline: %w", err) + } + + minerAddrBytes := new(bytes.Buffer) + if err := tm.ActorAddr.MarshalCBOR(minerAddrBytes); err != nil { + return nil, fmt.Errorf("failed to marshal miner address: %w", err) + } + + rand, err := tm.FullNode.StateGetRandomnessFromBeacon(ctx, crypto.DomainSeparationTag_WindowedPoStChallengeSeed, di.Challenge, minerAddrBytes.Bytes(), head.Key()) + if err != nil { + return nil, fmt.Errorf("failed to get randomness: %w", err) + } + postRand := abi.PoStRandomness(rand) + postRand[31] &= 0x3f // make fr32 compatible + + privateSectorInfo := ffi.PrivateSectorInfo{ + SectorInfo: proof.SectorInfo{ + SealProof: tm.proofType[sectorNumber], + SectorNumber: sectorNumber, + SealedCID: sealedCid, + }, + CacheDirPath: cacheDir, + PoStProofType: minerInfo.WindowPoStProofType, + SealedSectorPath: sealedPath, + } + + actorIdNum, err := address.IDFromAddress(tm.ActorAddr) + if err != nil { + return nil, fmt.Errorf("failed to get actor ID: %w", err) + } + actorId := abi.ActorID(actorIdNum) + + windowProofs, faultySectors, err := ffi.GenerateWindowPoSt(actorId, ffi.NewSortedPrivateSectorInfo(privateSectorInfo), postRand) + if err != nil { + return nil, fmt.Errorf("failed to generate window post: %w", err) + } + if len(faultySectors) > 0 { + return nil, fmt.Errorf("post failed for sectors: %v", faultySectors) + } + if len(windowProofs) != 1 { + return nil, fmt.Errorf("expected 1 proof, got %d", len(windowProofs)) + } + if windowProofs[0].PoStProof != minerInfo.WindowPoStProofType { + return nil, fmt.Errorf("expected proof type %d, got %d", minerInfo.WindowPoStProofType, windowProofs[0].PoStProof) + } + proofBytes := windowProofs[0].ProofBytes + + info := proof.WindowPoStVerifyInfo{ + Randomness: postRand, + Proofs: []proof.PoStProof{{PoStProof: minerInfo.WindowPoStProofType, ProofBytes: proofBytes}}, + ChallengedSectors: []proof.SectorInfo{{SealProof: tm.proofType[sectorNumber], SectorNumber: sectorNumber, SealedCID: sealedCid}}, + Prover: actorId, + } + + verified, err := ffi.VerifyWindowPoSt(info) + if err != nil { + return nil, fmt.Errorf("failed to verify window post: %w", err) + } + if !verified { + return nil, fmt.Errorf("window post verification failed") + } + + return proofBytes, nil +} +func (tm *TestUnmanagedMiner) waitPreCommitSealRandomness(ctx context.Context, sectorNumber abi.SectorNumber) abi.ChainEpoch { + // We want to draw seal randomness from a tipset that has already achieved finality as PreCommits are expensive to re-generate. + // Check if we already have an epoch that is already final and wait for such an epoch if we don't have one. + head, err := tm.FullNode.ChainHead(ctx) + require.NoError(tm.t, err) + + var sealRandEpoch abi.ChainEpoch + if head.Height() > policy.SealRandomnessLookback { + sealRandEpoch = head.Height() - policy.SealRandomnessLookback + } else { + sealRandEpoch = policy.SealRandomnessLookback + tm.t.Logf("Miner %s waiting for at least epoch %d for seal randomness for sector %d (current epoch %d)...", tm.ActorAddr, sealRandEpoch+5, + sectorNumber, head.Height()) + tm.FullNode.WaitTillChain(ctx, HeightAtLeast(sealRandEpoch+5)) + } + + tm.t.Logf("Miner %s using seal randomness from epoch %d for head %d for sector %d", tm.ActorAddr, sealRandEpoch, head.Height(), sectorNumber) + + return sealRandEpoch +} + +// calculateNextPostEpoch calculates the first epoch of the deadline proving window +// that is desired for the given sector for the specified miner. +// This function returns the current epoch and the calculated proving epoch. +func (tm *TestUnmanagedMiner) calculateNextPostEpoch( + ctx context.Context, + sectorNumber abi.SectorNumber, +) (abi.ChainEpoch, abi.ChainEpoch, error) { + // Retrieve the current blockchain head + head, err := tm.FullNode.ChainHead(ctx) + if err != nil { + return 0, 0, fmt.Errorf("failed to get chain head: %w", err) + } + + // Fetch the sector partition for the given sector number + sp, err := tm.FullNode.StateSectorPartition(ctx, tm.ActorAddr, sectorNumber, head.Key()) + if err != nil { + return 0, 0, fmt.Errorf("failed to get sector partition: %w", err) + } + + tm.t.Logf("Miner %s: WindowPoST(%d): SectorPartition: %+v", tm.ActorAddr, sectorNumber, sp) + + // Obtain the proving deadline information for the miner + di, err := tm.FullNode.StateMinerProvingDeadline(ctx, tm.ActorAddr, head.Key()) + if err != nil { + return 0, 0, fmt.Errorf("failed to get proving deadline: %w", err) + } + + tm.t.Logf("Miner %s: WindowPoST(%d): ProvingDeadline: %+v", tm.ActorAddr, sectorNumber, di) + + // Calculate the start of the period, adjusting if the current deadline has passed + periodStart := di.PeriodStart + if di.PeriodStart < di.CurrentEpoch && sp.Deadline <= di.Index { + // If the deadline has passed in the current proving period, calculate for the next period + periodStart += di.WPoStProvingPeriod + } + + // Calculate the exact epoch when proving should occur + provingEpoch := periodStart + (di.WPoStProvingPeriod/abi.ChainEpoch(di.WPoStPeriodDeadlines))*abi.ChainEpoch(sp.Deadline) + + tm.t.Logf("Miner %s: WindowPoST(%d): next ProvingEpoch: %d", tm.ActorAddr, sectorNumber, provingEpoch) + + return di.CurrentEpoch, provingEpoch, nil +} + +func (tm *TestUnmanagedMiner) generatePreCommit( + ctx context.Context, + sectorNumber abi.SectorNumber, + sealRandEpoch abi.ChainEpoch, + proofType abi.RegisteredSealProof, + pieceInfo []abi.PieceInfo, +) { + req := require.New(tm.t) + tm.t.Logf("Miner %s: Generating proof type %d PreCommit for sector %d...", tm.ActorAddr, proofType, sectorNumber) + + head, err := tm.FullNode.ChainHead(ctx) + req.NoError(err, "Miner %s: Failed to get chain head for sector %d", tm.ActorAddr, sectorNumber) + + minerAddrBytes := new(bytes.Buffer) + req.NoError(tm.ActorAddr.MarshalCBOR(minerAddrBytes), "Miner %s: Failed to marshal address for sector %d", tm.ActorAddr, sectorNumber) + + rand, err := tm.FullNode.StateGetRandomnessFromTickets(ctx, crypto.DomainSeparationTag_SealRandomness, sealRandEpoch, minerAddrBytes.Bytes(), head.Key()) + req.NoError(err, "Miner %s: Failed to get randomness for sector %d", tm.ActorAddr, sectorNumber) + sealTickets := abi.SealRandomness(rand) + + tm.t.Logf("Miner %s: Running proof type %d SealPreCommitPhase1 for sector %d...", tm.ActorAddr, proofType, sectorNumber) + + actorIdNum, err := address.IDFromAddress(tm.ActorAddr) + req.NoError(err, "Miner %s: Failed to get actor ID for sector %d", tm.ActorAddr, sectorNumber) + actorId := abi.ActorID(actorIdNum) + + pc1, err := ffi.SealPreCommitPhase1( + proofType, + tm.cacheDirPaths[sectorNumber], + tm.unsealedSectorPaths[sectorNumber], + tm.sealedSectorPaths[sectorNumber], + sectorNumber, + actorId, + sealTickets, + pieceInfo, + ) + req.NoError(err, "Miner %s: SealPreCommitPhase1 failed for sector %d", tm.ActorAddr, sectorNumber) + req.NotNil(pc1, "Miner %s: SealPreCommitPhase1 returned nil for sector %d", tm.ActorAddr, sectorNumber) + + tm.t.Logf("Miner %s: Running proof type %d SealPreCommitPhase2 for sector %d...", tm.ActorAddr, proofType, sectorNumber) + + sealedCid, unsealedCid, err := ffi.SealPreCommitPhase2( + pc1, + tm.cacheDirPaths[sectorNumber], + tm.sealedSectorPaths[sectorNumber], + ) + req.NoError(err, "Miner %s: SealPreCommitPhase2 failed for sector %d", tm.ActorAddr, sectorNumber) + + tm.t.Logf("Miner %s: Unsealed CID for sector %d: %s", tm.ActorAddr, sectorNumber, unsealedCid) + tm.t.Logf("Miner %s: Sealed CID for sector %d: %s", tm.ActorAddr, sectorNumber, sealedCid) + + tm.sealTickets[sectorNumber] = sealTickets + tm.sealedCids[sectorNumber] = sealedCid + tm.unsealedCids[sectorNumber] = unsealedCid +} + +func (tm *TestUnmanagedMiner) proveCommitWaitSeed(ctx context.Context, sectorNumber abi.SectorNumber) abi.InteractiveSealRandomness { + req := require.New(tm.t) + head, err := tm.FullNode.ChainHead(ctx) + req.NoError(err) + + tm.t.Logf("Miner %s: Fetching pre-commit info for sector %d...", tm.ActorAddr, sectorNumber) + preCommitInfo, err := tm.FullNode.StateSectorPreCommitInfo(ctx, tm.ActorAddr, sectorNumber, head.Key()) + req.NoError(err) + seedRandomnessHeight := preCommitInfo.PreCommitEpoch + policy.GetPreCommitChallengeDelay() + + tm.t.Logf("Miner %s: Waiting %d epochs for seed randomness at epoch %d (current epoch %d) for sector %d...", tm.ActorAddr, seedRandomnessHeight-head.Height(), seedRandomnessHeight, head.Height(), sectorNumber) + tm.FullNode.WaitTillChain(ctx, HeightAtLeast(seedRandomnessHeight+5)) + + minerAddrBytes := new(bytes.Buffer) + req.NoError(tm.ActorAddr.MarshalCBOR(minerAddrBytes)) + + head, err = tm.FullNode.ChainHead(ctx) + req.NoError(err) + + tm.t.Logf("Miner %s: Fetching seed randomness for sector %d...", tm.ActorAddr, sectorNumber) + rand, err := tm.FullNode.StateGetRandomnessFromBeacon(ctx, crypto.DomainSeparationTag_InteractiveSealChallengeSeed, seedRandomnessHeight, minerAddrBytes.Bytes(), head.Key()) + req.NoError(err) + seedRandomness := abi.InteractiveSealRandomness(rand) + + tm.t.Logf("Miner %s: Obtained seed randomness for sector %d: %x", tm.ActorAddr, sectorNumber, seedRandomness) + return seedRandomness +} + +func (tm *TestUnmanagedMiner) generateProveCommit( + ctx context.Context, + sectorNumber abi.SectorNumber, + proofType abi.RegisteredSealProof, + seedRandomness abi.InteractiveSealRandomness, + pieces []abi.PieceInfo, +) []byte { + tm.t.Logf("Miner %s: Generating proof type %d Sector Proof for sector %d...", tm.ActorAddr, proofType, sectorNumber) + req := require.New(tm.t) + + actorIdNum, err := address.IDFromAddress(tm.ActorAddr) + req.NoError(err) + actorId := abi.ActorID(actorIdNum) + + tm.t.Logf("Miner %s: Running proof type %d SealCommitPhase1 for sector %d...", tm.ActorAddr, proofType, sectorNumber) + + scp1, err := ffi.SealCommitPhase1( + proofType, + tm.sealedCids[sectorNumber], + tm.unsealedCids[sectorNumber], + tm.cacheDirPaths[sectorNumber], + tm.sealedSectorPaths[sectorNumber], + sectorNumber, + actorId, + tm.sealTickets[sectorNumber], + seedRandomness, + pieces, + ) + req.NoError(err) + + tm.t.Logf("Miner %s: Running proof type %d SealCommitPhase2 for sector %d...", tm.ActorAddr, proofType, sectorNumber) + + sectorProof, err := ffi.SealCommitPhase2(scp1, sectorNumber, actorId) + req.NoError(err) + + tm.t.Logf("Miner %s: Got proof type %d sector proof of length %d for sector %d", tm.ActorAddr, proofType, len(sectorProof), sectorNumber) + + return sectorProof +} + +func (tm *TestUnmanagedMiner) submitMessage( + ctx context.Context, + params cbg.CBORMarshaler, + value uint64, + method abi.MethodNum, +) (*api.MsgLookup, error) { + enc, aerr := actors.SerializeParams(params) + if aerr != nil { + return nil, aerr + } + + tm.t.Logf("Submitting message for miner %s with method number %d", tm.ActorAddr, method) + + m, err := tm.FullNode.MpoolPushMessage(ctx, &types.Message{ + To: tm.ActorAddr, + From: tm.OwnerKey.Address, + Value: types.FromFil(value), + Method: method, + Params: enc, + }, nil) + if err != nil { + return nil, err + } + + tm.t.Logf("Pushed message with CID: %s for miner %s", m.Cid(), tm.ActorAddr) + + msg, err := tm.FullNode.StateWaitMsg(ctx, m.Cid(), 2, api.LookbackNoLimit, true) + if err != nil { + return nil, err + } + + tm.t.Logf("Message with CID: %s has been confirmed on-chain for miner %s", m.Cid(), tm.ActorAddr) + + return msg, nil +} + +func requireTempFile(t *testing.T, fileContentsReader io.Reader, size uint64) *os.File { + // Create a temporary file + tempFile, err := os.CreateTemp(t.TempDir(), "") + require.NoError(t, err) + + // Copy contents from the reader to the temporary file + bytesCopied, err := io.Copy(tempFile, fileContentsReader) + require.NoError(t, err) + + // Ensure the expected size matches the copied size + require.EqualValues(t, size, bytesCopied) + + // Synchronize the file's content to disk + require.NoError(t, tempFile.Sync()) + + // Reset the file pointer to the beginning of the file + _, err = tempFile.Seek(0, io.SeekStart) + require.NoError(t, err) + + return tempFile +} diff --git a/itests/manual_onboarding_test.go b/itests/manual_onboarding_test.go new file mode 100644 index 00000000000..f10c8b7c171 --- /dev/null +++ b/itests/manual_onboarding_test.go @@ -0,0 +1,174 @@ +package itests + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/itests/kit" +) + +const defaultSectorSize = abi.SectorSize(2 << 10) // 2KiB + +// Manually onboard CC sectors, bypassing lotus-miner onboarding pathways +func TestManualSectorOnboarding(t *testing.T) { + req := require.New(t) + + for _, withMockProofs := range []bool{true, false} { + testName := "WithRealProofs" + if withMockProofs { + testName = "WithMockProofs" + } + t.Run(testName, func(t *testing.T) { + if testName == "WithRealProofs" { + kit.Expensive(t) + } + kit.QuietMiningLogs() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var ( + // need to pick a balance value so that the test is not racy on CI by running through it's WindowPostDeadlines too fast + blocktime = 2 * time.Millisecond + client kit.TestFullNode + minerA kit.TestMiner // A is a standard genesis miner + ) + + // Setup and begin mining with a single miner (A) + // Miner A will only be a genesis Miner with power allocated in the genesis block and will not onboard any sectors from here on + kitOpts := []kit.EnsembleOpt{} + if withMockProofs { + kitOpts = append(kitOpts, kit.MockProofs()) + } + ens := kit.NewEnsemble(t, kitOpts...). + FullNode(&client, kit.SectorSize(defaultSectorSize)). + // preseal more than the default number of sectors to ensure that the genesis miner has power + // because our unmanaged miners won't produce blocks so we may get null rounds + Miner(&minerA, &client, kit.PresealSectors(5), kit.SectorSize(defaultSectorSize), kit.WithAllSubsystems()). + Start(). + InterconnectAll() + blockMiners := ens.BeginMiningMustPost(blocktime) + req.Len(blockMiners, 1) + blockMiner := blockMiners[0] + + // Instantiate MinerB to manually handle sector onboarding and power acquisition through sector activation. + // Unlike other miners managed by the Lotus Miner storage infrastructure, MinerB operates independently, + // performing all related tasks manually. Managed by the TestKit, MinerB has the capability to utilize actual proofs + // for the processes of sector onboarding and activation. + nodeOpts := []kit.NodeOpt{kit.SectorSize(defaultSectorSize), kit.OwnerAddr(client.DefaultKey)} + minerB, ens := ens.UnmanagedMiner(&client, nodeOpts...) + // MinerC is similar to MinerB, but onboards pieces instead of a pure CC sector + minerC, ens := ens.UnmanagedMiner(&client, nodeOpts...) + + ens.Start() + + build.Clock.Sleep(time.Second) + + t.Log("Checking initial power ...") + + // Miner A should have power as it has already onboarded sectors in the genesis block + head, err := client.ChainHead(ctx) + req.NoError(err) + p, err := client.StateMinerPower(ctx, minerA.ActorAddr, head.Key()) + req.NoError(err) + t.Logf("MinerA RBP: %v, QaP: %v", p.MinerPower.QualityAdjPower.String(), p.MinerPower.RawBytePower.String()) + + // Miner B should have no power as it has yet to onboard and activate any sectors + minerB.AssertNoPower(ctx) + + // Miner C should have no power as it has yet to onboard and activate any sectors + minerC.AssertNoPower(ctx) + + // ---- Miner B onboards a CC sector + var bSectorNum abi.SectorNumber + var bRespCh chan kit.WindowPostResp + var bWdPostCancelF context.CancelFunc + + if withMockProofs { + bSectorNum, bRespCh, bWdPostCancelF = minerB.OnboardCCSectorWithMockProofs(ctx, kit.TestSpt) + } else { + bSectorNum, bRespCh, bWdPostCancelF = minerB.OnboardCCSectorWithRealProofs(ctx, kit.TestSpt) + } + // Miner B should still not have power as power can only be gained after sector is activated i.e. the first WindowPost is submitted for it + minerB.AssertNoPower(ctx) + // Ensure that the block miner checks for and waits for posts during the appropriate proving window from our new miner with a sector + blockMiner.WatchMinerForPost(minerB.ActorAddr) + + // --- Miner C onboards sector with data/pieces + var cSectorNum abi.SectorNumber + var cRespCh chan kit.WindowPostResp + + if withMockProofs { + cSectorNum, cRespCh, _ = minerC.OnboardSectorWithPiecesAndMockProofs(ctx, kit.TestSpt) + } else { + cSectorNum, cRespCh, _ = minerC.OnboardSectorWithPiecesAndRealProofs(ctx, kit.TestSpt) + } + // Miner C should still not have power as power can only be gained after sector is activated i.e. the first WindowPost is submitted for it + minerC.AssertNoPower(ctx) + // Ensure that the block miner checks for and waits for posts during the appropriate proving window from our new miner with a sector + blockMiner.WatchMinerForPost(minerC.ActorAddr) + + // Wait till both miners' sectors have had their first post and are activated and check that this is reflected in miner power + waitTillActivatedAndAssertPower(ctx, t, minerB, bRespCh, bSectorNum, uint64(defaultSectorSize), withMockProofs) + waitTillActivatedAndAssertPower(ctx, t, minerC, cRespCh, cSectorNum, uint64(defaultSectorSize), withMockProofs) + + // Miner B has activated the CC sector -> upgrade it with snapdeals + // Note: We can't activate a sector with mock proofs as the WdPost is successfully disputed and so no point + // in snapping it as snapping is only for activated sectors + if !withMockProofs { + minerB.SnapDealWithRealProofs(ctx, kit.TestSpt, bSectorNum) + // cancel the WdPost for the CC sector as the corresponding CommR is no longer valid + bWdPostCancelF() + } + }) + } +} + +func waitTillActivatedAndAssertPower(ctx context.Context, t *testing.T, miner *kit.TestUnmanagedMiner, respCh chan kit.WindowPostResp, sector abi.SectorNumber, + sectorSize uint64, withMockProofs bool) { + req := require.New(t) + // wait till sector is activated + select { + case resp := <-respCh: + req.NoError(resp.Error) + req.True(resp.Posted) + case <-ctx.Done(): + t.Fatal("timed out waiting for sector activation") + } + + // Fetch on-chain sector properties + head, err := miner.FullNode.ChainHead(ctx) + req.NoError(err) + + soi, err := miner.FullNode.StateSectorGetInfo(ctx, miner.ActorAddr, sector, head.Key()) + req.NoError(err) + t.Logf("Miner %s SectorOnChainInfo %d: %+v", miner.ActorAddr.String(), sector, soi) + + _ = miner.FullNode.WaitTillChain(ctx, kit.HeightAtLeast(head.Height()+5)) + + t.Log("Checking power after PoSt ...") + + // Miner B should now have power + miner.AssertPower(ctx, sectorSize, sectorSize) + + if withMockProofs { + // WindowPost Dispute should succeed as we are using mock proofs + err := miner.SubmitPostDispute(ctx, sector) + require.NoError(t, err) + } else { + // WindowPost Dispute should fail + assertDisputeFails(ctx, t, miner, sector) + } +} + +func assertDisputeFails(ctx context.Context, t *testing.T, miner *kit.TestUnmanagedMiner, sector abi.SectorNumber) { + err := miner.SubmitPostDispute(ctx, sector) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to dispute valid post") + require.Contains(t, err.Error(), "(RetCode=16)") +} diff --git a/itests/path_type_filters_test.go b/itests/path_type_filters_test.go deleted file mode 100644 index a2e2049323b..00000000000 --- a/itests/path_type_filters_test.go +++ /dev/null @@ -1,200 +0,0 @@ -package itests - -import ( - "context" - "strings" - "testing" - "time" - - logging "github.com/ipfs/go-log/v2" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/lotus/itests/kit" - "github.com/filecoin-project/lotus/storage/sealer/sealtasks" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -func TestPathTypeFilters(t *testing.T) { - kit.QuietMiningLogs() - - runTest := func(t *testing.T, name string, asserts func(t *testing.T, ctx context.Context, miner *kit.TestMiner, run func())) { - t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _ = logging.SetLogLevel("storageminer", "INFO") - - var ( - client kit.TestFullNode - miner kit.TestMiner - wiw, wdw kit.TestWorker - ) - ens := kit.NewEnsemble(t, kit.LatestActorsAt(-1)). - FullNode(&client, kit.ThroughRPC()). - Miner(&miner, &client, kit.WithAllSubsystems(), kit.ThroughRPC(), kit.PresealSectors(2), kit.NoStorage()). - Worker(&miner, &wiw, kit.ThroughRPC(), kit.NoStorage(), kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWinningPoSt})). - Worker(&miner, &wdw, kit.ThroughRPC(), kit.NoStorage(), kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWindowPoSt})). - Start() - - ens.InterconnectAll().BeginMiningMustPost(2 * time.Millisecond) - - asserts(t, ctx, &miner, func() { - dh := kit.NewDealHarness(t, &client, &miner, &miner) - dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1}) - }) - }) - } - - runTest(t, "invalid-type-alert", func(t *testing.T, ctx context.Context, miner *kit.TestMiner, run func()) { - slU := miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) { - meta.CanSeal = true - meta.AllowTypes = []string{"unsealed", "seeled"} - }) - - storlist, err := miner.StorageList(ctx) - require.NoError(t, err) - - require.Len(t, storlist, 2) // 1 path we've added + preseal - - si, err := miner.StorageInfo(ctx, slU) - require.NoError(t, err) - - // check that bad entries are filtered - require.Len(t, si.DenyTypes, 0) - require.Len(t, si.AllowTypes, 1) - require.Equal(t, "unsealed", si.AllowTypes[0]) - - as, err := miner.LogAlerts(ctx) - require.NoError(t, err) - - var found bool - for _, a := range as { - if a.Active && a.Type.System == "sector-index" && strings.HasPrefix(a.Type.Subsystem, "pathconf-") { - require.False(t, found) - require.Contains(t, string(a.LastActive.Message), "unknown sector file type 'seeled'") - found = true - } - } - require.True(t, found) - }) - - runTest(t, "seal-to-stor-unseal-allowdeny", func(t *testing.T, ctx context.Context, miner *kit.TestMiner, run func()) { - // allow all types in the sealing path - sealScratch := miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) { - meta.CanSeal = true - }) - - // unsealed storage - unsStor := miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) { - meta.CanStore = true - meta.AllowTypes = []string{"unsealed"} - }) - - // other storage - sealStor := miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) { - meta.CanStore = true - meta.DenyTypes = []string{"unsealed"} - }) - - storlist, err := miner.StorageList(ctx) - require.NoError(t, err) - - require.Len(t, storlist, 4) // 3 paths we've added + preseal - - run() - - storlist, err = miner.StorageList(ctx) - require.NoError(t, err) - - require.Len(t, storlist[sealScratch], 0) - require.Len(t, storlist[unsStor], 1) - require.Len(t, storlist[sealStor], 1) - - require.Equal(t, storiface.FTUnsealed, storlist[unsStor][0].SectorFileType) - require.Equal(t, storiface.FTSealed|storiface.FTCache, storlist[sealStor][0].SectorFileType) - }) - - runTest(t, "sealstor-unseal-allowdeny", func(t *testing.T, ctx context.Context, miner *kit.TestMiner, run func()) { - // unsealed storage - unsStor := miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) { - meta.CanStore = true - meta.CanSeal = true - meta.AllowTypes = []string{"unsealed"} - }) - - // other storage - sealStor := miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) { - meta.CanStore = true - meta.CanSeal = true - meta.DenyTypes = []string{"unsealed"} - }) - - storlist, err := miner.StorageList(ctx) - require.NoError(t, err) - - require.Len(t, storlist, 3) // 2 paths we've added + preseal - - run() - - storlist, err = miner.StorageList(ctx) - require.NoError(t, err) - - require.Len(t, storlist[unsStor], 1) - require.Len(t, storlist[sealStor], 1) - - require.Equal(t, storiface.FTUnsealed, storlist[unsStor][0].SectorFileType) - require.Equal(t, storiface.FTSealed|storiface.FTCache, storlist[sealStor][0].SectorFileType) - }) - - runTest(t, "seal-store-allseparate", func(t *testing.T, ctx context.Context, miner *kit.TestMiner, run func()) { - // sealing stores - slU := miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) { - meta.CanSeal = true - meta.AllowTypes = []string{"unsealed"} - }) - slS := miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) { - meta.CanSeal = true - meta.AllowTypes = []string{"sealed"} - }) - slC := miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) { - meta.CanSeal = true - meta.AllowTypes = []string{"cache"} - }) - - // storage stores - stU := miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) { - meta.CanStore = true - meta.AllowTypes = []string{"unsealed"} - }) - stS := miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) { - meta.CanStore = true - meta.AllowTypes = []string{"sealed"} - }) - stC := miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) { - meta.CanStore = true - meta.AllowTypes = []string{"cache"} - }) - - storlist, err := miner.StorageList(ctx) - require.NoError(t, err) - - require.Len(t, storlist, 7) // 6 paths we've added + preseal - - run() - - storlist, err = miner.StorageList(ctx) - require.NoError(t, err) - - require.Len(t, storlist[slU], 0) - require.Len(t, storlist[slS], 0) - require.Len(t, storlist[slC], 0) - - require.Len(t, storlist[stU], 1) - require.Len(t, storlist[stS], 1) - require.Len(t, storlist[stC], 1) - - require.Equal(t, storiface.FTUnsealed, storlist[stU][0].SectorFileType) - require.Equal(t, storiface.FTSealed, storlist[stS][0].SectorFileType) - require.Equal(t, storiface.FTCache, storlist[stC][0].SectorFileType) - }) -} diff --git a/itests/sector_finalize_early_test.go b/itests/sector_finalize_early_test.go deleted file mode 100644 index 1b8fcb346f9..00000000000 --- a/itests/sector_finalize_early_test.go +++ /dev/null @@ -1,72 +0,0 @@ -// stm: #integration -package itests - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/lotus/itests/kit" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -func TestDealsWithFinalizeEarly(t *testing.T) { - //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, - //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 - //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 - //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 - - //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 - //stm: @STORAGE_INFO_001 - if testing.Short() { - t.Skip("skipping test in short mode") - } - - kit.QuietMiningLogs() - - var blockTime = 50 * time.Millisecond - - // We use two miners so that in case the actively tested miner misses PoSt, we still have a blockchain - client, miner, poster, ens := kit.EnsembleOneTwo(t, kit.ThroughRPC(), kit.MutateSealingConfig(func(sc *config.SealingConfig) { sc.FinalizeEarly = true })) // no mock proofs. - ens.InterconnectAll().BeginMiningMustPost(blockTime, poster) - dh := kit.NewDealHarness(t, client, miner, miner) - - ctx := context.Background() - - miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) { - meta.Weight = 1000000000 - meta.CanSeal = true - }) - miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) { - meta.Weight = 1000000000 - meta.CanStore = true - }) - - //stm: @STORAGE_LIST_001 - sl, err := miner.StorageList(ctx) - require.NoError(t, err) - for si, d := range sl { - i, err := miner.StorageInfo(ctx, si) - require.NoError(t, err) - - fmt.Printf("stor d:%d %+v\n", len(d), i) - } - - t.Run("single", func(t *testing.T) { - dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1}) - }) - - //stm: @STORAGE_LIST_001 - sl, err = miner.StorageList(ctx) - require.NoError(t, err) - for si, d := range sl { - i, err := miner.StorageInfo(ctx, si) - require.NoError(t, err) - - fmt.Printf("stor d:%d %+v\n", len(d), i) - } -} diff --git a/itests/supply_test.go b/itests/supply_test.go new file mode 100644 index 00000000000..5c603338d51 --- /dev/null +++ b/itests/supply_test.go @@ -0,0 +1,205 @@ +package itests + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/itests/kit" +) + +func TestCirciulationSupplyUpgrade(t *testing.T) { + kit.QuietMiningLogs() + ctx := context.Background() + + // Choosing something divisible by epochs per day to remove error with simple deal duration + lockedClientBalance := big.Mul(abi.NewTokenAmount(11_520_000), abi.NewTokenAmount(1e18)) + lockedProviderBalance := big.Mul(abi.NewTokenAmount(1_000_000), abi.NewTokenAmount(1e18)) + var height0 abi.ChainEpoch + var height1 abi.ChainEpoch + // Lock collateral in market on nv22 network + fullNode0, blockMiner0, ens0 := kit.EnsembleMinimal(t, + kit.GenesisNetworkVersion(network.Version22), + kit.MockProofs(), + ) + { + + worker0 := blockMiner0.OwnerKey.Address + ens0.InterconnectAll().BeginMining(50 * time.Millisecond) + + // Lock collateral in market actor + wallet0, err := fullNode0.WalletDefaultAddress(ctx) + require.NoError(t, err) + + // Add 1 FIL to cover provider collateral + c00, err := fullNode0.MarketAddBalance(ctx, wallet0, wallet0, lockedClientBalance) + require.NoError(t, err) + fullNode0.WaitMsg(ctx, c00) + c01, err := blockMiner0.FullNode.MarketAddBalance(ctx, worker0, blockMiner0.ActorAddr, lockedProviderBalance) + require.NoError(t, err) + fullNode0.WaitMsg(ctx, c01) + + psd0, err := fullNode0.MpoolPushMessage(ctx, + makePSDMessage( + ctx, + t, + blockMiner0.ActorAddr, + worker0, + wallet0, + lockedProviderBalance, + lockedClientBalance, + fullNode0.WalletSign, + ), + nil, + ) + require.NoError(t, err) + fullNode0.WaitMsg(ctx, psd0.Cid()) + head, err := fullNode0.ChainHead(ctx) + require.NoError(t, err) + height0 = head.Height() + } + + // Lock collateral in market on nv23 network + fullNode1, blockMiner1, ens1 := kit.EnsembleMinimal(t, + kit.GenesisNetworkVersion(network.Version23), + kit.MockProofs(), + ) + { + worker1 := blockMiner1.OwnerKey.Address + ens1.InterconnectAll().BeginMining(50 * time.Millisecond) + + // Lock collateral in market actor + wallet1, err := fullNode1.WalletDefaultAddress(ctx) + require.NoError(t, err) + c10, err := fullNode1.MarketAddBalance(ctx, wallet1, wallet1, lockedClientBalance) + require.NoError(t, err) + fullNode1.WaitMsg(ctx, c10) + c11, err := blockMiner1.FullNode.MarketAddBalance(ctx, worker1, blockMiner1.ActorAddr, lockedProviderBalance) + require.NoError(t, err) + fullNode1.WaitMsg(ctx, c11) + + psd1, err := fullNode1.MpoolPushMessage(ctx, + makePSDMessage( + ctx, + t, + blockMiner1.ActorAddr, + worker1, + wallet1, + lockedProviderBalance, + lockedClientBalance, + fullNode1.WalletSign, + ), + nil, + ) + require.NoError(t, err) + fullNode1.WaitMsg(ctx, psd1.Cid()) + head, err := fullNode1.ChainHead(ctx) + require.NoError(t, err) + height1 = head.Height() + } + + // Measure each circulating supply at the latest height where market balance was locked + // This allows us to normalize against fluctuations in circulating supply based on the underlying + // dynamics irrelevant to this change + + max := height0 + if height0 < height1 { + max = height1 + } + max = max + 1 // Measure supply at height after the deal locking funds was published + + // Let both chains catch up + fullNode0.WaitTillChain(ctx, func(ts *types.TipSet) bool { + return ts.Height() >= max + }) + fullNode1.WaitTillChain(ctx, func(ts *types.TipSet) bool { + return ts.Height() >= max + }) + + ts0, err := fullNode0.ChainGetTipSetByHeight(ctx, max, types.EmptyTSK) + require.NoError(t, err) + ts1, err := fullNode1.ChainGetTipSetByHeight(ctx, max, types.EmptyTSK) + require.NoError(t, err) + + nv22Supply, err := fullNode0.StateVMCirculatingSupplyInternal(ctx, ts0.Key()) + require.NoError(t, err, "Failed to fetch nv22 circulating supply") + nv23Supply, err := fullNode1.StateVMCirculatingSupplyInternal(ctx, ts1.Key()) + require.NoError(t, err, "Failed to fetch nv23 circulating supply") + + // Unfortunately there's still some non-determinism in supply dynamics so check for equality within a tolerance + tolerance := big.Mul(abi.NewTokenAmount(1000), abi.NewTokenAmount(1e18)) + totalLocked := big.Sum(lockedClientBalance, lockedProviderBalance) + diff := big.Sub( + big.Sum(totalLocked, nv23Supply.FilLocked), + nv22Supply.FilLocked, + ) + assert.Equal(t, -1, big.Cmp( + diff.Abs(), + tolerance), "Difference from expected locked supply between versions exceeds tolerance") +} + +// Message will be valid and lock funds but the data is fake so the deal will never be activated +func makePSDMessage( + ctx context.Context, + t *testing.T, + provider, + worker, + client address.Address, + providerLocked, + clientLocked abi.TokenAmount, + signFunc func(context.Context, address.Address, []byte) (*crypto.Signature, error)) *types.Message { + + dummyCid, err := cid.Parse("baga6ea4seaqflae5c3k2odz4sqfufmrmoegplhk5jbq3co4fgmmy56yc2qfh4aq") + require.NoError(t, err) + + duration := 2880 * 200 // 200 days + ppe := big.Div(clientLocked, big.NewInt(2880*200)) + proposal := market.DealProposal{ + PieceCID: dummyCid, + PieceSize: abi.PaddedPieceSize(128), + VerifiedDeal: false, + Client: client, + Provider: provider, + ClientCollateral: big.Zero(), + ProviderCollateral: providerLocked, + StartEpoch: 10000, + EndEpoch: 10000 + abi.ChainEpoch(duration), + StoragePricePerEpoch: ppe, + } + buf := bytes.NewBuffer(nil) + require.NoError(t, proposal.MarshalCBOR(buf)) + sig, err := signFunc(ctx, client, buf.Bytes()) + require.NoError(t, err) + // Publish storage deal + params, err := actors.SerializeParams(&market.PublishStorageDealsParams{ + Deals: []market.ClientDealProposal{ + { + Proposal: proposal, + ClientSignature: *sig, + }, + }, + }) + require.NoError(t, err) + return &types.Message{ + To: builtin.StorageMarketActorAddr, + From: worker, + Value: types.NewInt(0), + Method: builtin.MethodsMarket.PublishStorageDeals, + Params: params, + } +} diff --git a/itests/worker_test.go b/itests/worker_test.go index 31ec40b5937..b3b8edd7632 100644 --- a/itests/worker_test.go +++ b/itests/worker_test.go @@ -585,82 +585,3 @@ waitForProof: require.NoError(t, params.UnmarshalCBOR(bytes.NewBuffer(slmsg.Params))) require.Equal(t, abi.RegisteredPoStProof_StackedDrgWindow2KiBV1_1, params.Proofs[0].PoStProof) } - -func TestWorkerPledgeExpireCommit(t *testing.T) { - kit.QuietMiningLogs() - _ = logging.SetLogLevel("sectors", "debug") - - var tasksNoC2 = kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTAddPiece, sealtasks.TTDataCid, sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit2, - sealtasks.TTUnseal, sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFinalizeUnsealed}) - - fc := config.DefaultStorageMiner().Fees - fc.MaxCommitGasFee = types.FIL(abi.NewTokenAmount(10000)) // 10000 attofil, way too low for anything to land - - ctx := context.Background() - client, miner, worker, ens := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(), kit.WithNoLocalSealing(true), - kit.MutateSealingConfig(func(sc *config.SealingConfig) { - sc.AggregateCommits = true - }), - kit.ConstructorOpts( - node.Override(new(*sealing.Sealing), modules.SealingPipeline(fc)), - ), - kit.SplitstoreDisable(), // disable splitstore because messages which take a long time may get dropped - tasksNoC2) // no mock proofs - - ens.InterconnectAll().BeginMiningMustPost(2 * time.Millisecond) - - e, err := worker.Enabled(ctx) - require.NoError(t, err) - require.True(t, e) - - dh := kit.NewDealHarness(t, client, miner, miner) - - startEpoch := abi.ChainEpoch(4 << 10) - - dh.StartRandomDeal(ctx, kit.MakeFullDealParams{ - Rseed: 7, - StartEpoch: startEpoch, - }) - - var sn abi.SectorNumber - - require.Eventually(t, func() bool { - s, err := miner.SectorsListNonGenesis(ctx) - require.NoError(t, err) - if len(s) == 0 { - return false - } - if len(s) > 1 { - t.Fatalf("expected 1 sector, got %d", len(s)) - } - sn = s[0] - return true - }, 30*time.Second, 1*time.Second) - - t.Log("sector", sn) - - t.Log("sector committing") - - // wait until after startEpoch - client.WaitTillChain(ctx, kit.HeightAtLeast(startEpoch+20)) - - t.Log("after start") - - sstate, err := miner.SectorsStatus(ctx, sn, false) - require.NoError(t, err) - require.Equal(t, api.SectorState(sealing.SubmitCommitAggregate), sstate.State) - - _, err = miner.SectorCommitFlush(ctx) - require.NoError(t, err) - - require.Eventually(t, func() bool { - sstate, err := miner.SectorsStatus(ctx, sn, false) - require.NoError(t, err) - - t.Logf("sector state: %s", sstate.State) - - return sstate.State == api.SectorState(sealing.Removed) - }, 30*time.Second, 1*time.Second) - - t.Log("sector removed") -} diff --git a/itests/worker_upgrade_test.go b/itests/worker_upgrade_test.go deleted file mode 100644 index b253a26a577..00000000000 --- a/itests/worker_upgrade_test.go +++ /dev/null @@ -1,170 +0,0 @@ -package itests - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/itests/kit" - "github.com/filecoin-project/lotus/node/config" - sealing "github.com/filecoin-project/lotus/storage/pipeline" - "github.com/filecoin-project/lotus/storage/sealer/sealtasks" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -func TestWorkerUpgradeAbortCleanup(t *testing.T) { - ctx := context.Background() - blockTime := 1 * time.Millisecond - kit.QuietMiningLogs() - - client, miner, ens := kit.EnsembleMinimal(t, kit.WithAllSubsystems(), kit.ThroughRPC(), kit.WithNoLocalSealing(true), - kit.NoStorage(), // no storage to have better control over path settings - kit.MutateSealingConfig(func(sc *config.SealingConfig) { sc.FinalizeEarly = true })) // no mock proofs - - var worker kit.TestWorker - ens.Worker(miner, &worker, kit.ThroughRPC(), kit.NoStorage(), // no storage to have better control over path settings - kit.WithTaskTypes([]sealtasks.TaskType{ - sealtasks.TTFetch, sealtasks.TTAddPiece, - sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFinalizeUnsealed, sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit2, - sealtasks.TTReplicaUpdate, // only first update step, later steps will not run and we'll abort - }), - ) - - ens.Start().InterconnectAll().BeginMiningMustPost(blockTime) - - maddr, err := miner.ActorAddress(ctx) - if err != nil { - t.Fatal(err) - } - - // get storage paths - - // store-only path on the miner - miner.AddStorage(ctx, t, func(cfg *storiface.LocalStorageMeta) { - cfg.CanSeal = false - cfg.CanStore = true - }) - - mlocal, err := miner.StorageLocal(ctx) - require.NoError(t, err) - require.Len(t, mlocal, 2) // genesis and one local - - // we want a seal-only path on the worker disconnected from miner path - worker.AddStorage(ctx, t, func(cfg *storiface.LocalStorageMeta) { - cfg.CanSeal = true - cfg.CanStore = false - }) - - wpaths, err := worker.Paths(ctx) - require.NoError(t, err) - require.Len(t, wpaths, 1) - - // check sectors in paths - checkSectors := func(miners, workers storiface.SectorFileType) { - paths, err := miner.StorageList(ctx) - require.NoError(t, err) - require.Len(t, paths, 3) // genesis, miner, worker - - // first loop for debugging - for id, decls := range paths { - pinfo, err := miner.StorageInfo(ctx, id) - require.NoError(t, err) - - switch { - case id == wpaths[0].ID: // worker path - fmt.Println("Worker Decls ", len(decls), decls) - case !pinfo.CanStore && !pinfo.CanSeal: // genesis path - fmt.Println("Genesis Decls ", len(decls), decls) - default: // miner path - fmt.Println("Miner Decls ", len(decls), decls) - } - } - - for id, decls := range paths { - pinfo, err := miner.StorageInfo(ctx, id) - require.NoError(t, err) - - switch { - case id == wpaths[0].ID: // worker path - if workers != storiface.FTNone { - require.Len(t, decls, 1) - require.EqualValues(t, workers.Strings(), decls[0].SectorFileType.Strings()) - } else { - require.Len(t, decls, 0) - } - case !pinfo.CanStore && !pinfo.CanSeal: // genesis path - require.Len(t, decls, kit.DefaultPresealsPerBootstrapMiner) - default: // miner path - if miners != storiface.FTNone { - require.Len(t, decls, 1) - require.EqualValues(t, miners.Strings(), decls[0].SectorFileType.Strings()) - } else { - require.Len(t, decls, 0) - } - } - } - } - checkSectors(storiface.FTNone, storiface.FTNone) - - // get a sector for upgrading - miner.PledgeSectors(ctx, 1, 0, nil) - sl, err := miner.SectorsListNonGenesis(ctx) - require.NoError(t, err) - require.Len(t, sl, 1, "expected 1 sector") - - snum := sl[0] - - checkSectors(storiface.FTCache|storiface.FTSealed, storiface.FTNone) - - client.WaitForSectorActive(ctx, t, snum, maddr) - - // make available - err = miner.SectorMarkForUpgrade(ctx, snum, true) - require.NoError(t, err) - - // Start a deal - - dh := kit.NewDealHarness(t, client, miner, miner) - res, _ := client.CreateImportFile(ctx, 123, 0) - dp := dh.DefaultStartDealParams() - dp.Data.Root = res.Root - deal := dh.StartDeal(ctx, dp) - - // wait for the deal to be in a sector - dh.WaitDealSealed(ctx, deal, true, false, nil) - - // wait for replica update to happen - require.Eventually(t, func() bool { - sstate, err := miner.SectorsStatus(ctx, snum, false) - require.NoError(t, err) - return sstate.State == api.SectorState(sealing.ProveReplicaUpdate) - }, 10*time.Second, 50*time.Millisecond) - - // check that the sector was copied to the worker - checkSectors(storiface.FTCache|storiface.FTSealed, storiface.FTCache|storiface.FTSealed|storiface.FTUnsealed|storiface.FTUpdate|storiface.FTUpdateCache) - - // abort upgrade - err = miner.SectorAbortUpgrade(ctx, snum) - require.NoError(t, err) - - // the task is stuck in scheduler, so manually abort the task to get the sector fsm moving - si := miner.SchedInfo(ctx) - err = miner.SealingRemoveRequest(ctx, si.SchedInfo.Requests[0].SchedId) - require.NoError(t, err) - - var lastState api.SectorState - require.Eventually(t, func() bool { - sstate, err := miner.SectorsStatus(ctx, snum, false) - require.NoError(t, err) - lastState = sstate.State - - return sstate.State == api.SectorState(sealing.Proving) - }, 10*time.Second, 50*time.Millisecond, "last state was %s", &lastState) - - // check that nothing was left on the worker - checkSectors(storiface.FTCache|storiface.FTSealed, storiface.FTNone) -} diff --git a/lib/filler/filler.go b/lib/filler/filler.go new file mode 100644 index 00000000000..42bb4fa99ab --- /dev/null +++ b/lib/filler/filler.go @@ -0,0 +1,43 @@ +package filler + +import ( + "math/bits" + + "github.com/filecoin-project/go-state-types/abi" +) + +func FillersFromRem(in abi.UnpaddedPieceSize) ([]abi.UnpaddedPieceSize, error) { + // Convert to in-sector bytes for easier math: + // + // Sector size to user bytes ratio is constant, e.g. for 1024B we have 1016B + // of user-usable data. + // + // (1024/1016 = 128/127) + // + // Given that we can get sector size by simply adding 1/127 of the user + // bytes + // + // (we convert to sector bytes as they are nice round binary numbers) + + toFill := uint64(in + (in / 127)) + + // We need to fill the sector with pieces that are powers of 2. Conveniently + // computers store numbers in binary, which means we can look at 1s to get + // all the piece sizes we need to fill the sector. It also means that number + // of pieces is the number of 1s in the number of remaining bytes to fill + out := make([]abi.UnpaddedPieceSize, bits.OnesCount64(toFill)) + for i := range out { + // Extract the next lowest non-zero bit + next := bits.TrailingZeros64(toFill) + psize := uint64(1) << next + // e.g: if the number is 0b010100, psize will be 0b000100 + + // set that bit to 0 by XORing it, so the next iteration looks at the + // next bit + toFill ^= psize + + // Add the piece size to the list of pieces we need to create + out[i] = abi.PaddedPieceSize(psize).Unpadded() + } + return out, nil +} diff --git a/storage/pipeline/utils_test.go b/lib/filler/filler_test.go similarity index 96% rename from storage/pipeline/utils_test.go rename to lib/filler/filler_test.go index fb9548a2832..a9b62da3066 100644 --- a/storage/pipeline/utils_test.go +++ b/lib/filler/filler_test.go @@ -1,4 +1,4 @@ -package sealing +package filler import ( "testing" @@ -9,7 +9,7 @@ import ( ) func testFill(t *testing.T, n abi.UnpaddedPieceSize, exp []abi.UnpaddedPieceSize) { - f, err := fillersFromRem(n) + f, err := FillersFromRem(n) assert.NoError(t, err) assert.Equal(t, exp, f) diff --git a/lib/harmony/harmonydb/harmonydb.go b/lib/harmony/harmonydb/harmonydb.go index 56b5acdfee2..28b071c2eba 100644 --- a/lib/harmony/harmonydb/harmonydb.go +++ b/lib/harmony/harmonydb/harmonydb.go @@ -25,7 +25,7 @@ import ( type ITestID string -// ItestNewID see ITestWithID doc +// ITestNewID see ITestWithID doc func ITestNewID() ITestID { return ITestID(strconv.Itoa(rand.Intn(99999))) } @@ -278,7 +278,10 @@ func (db *DB) upgrade() error { logger.Error("weird embed file read err") return err } - for _, s := range strings.Split(string(file), ";") { // Implement the changes. + + logger.Infow("Upgrading", "file", name, "size", len(file)) + + for _, s := range parseSQLStatements(string(file)) { // Implement the changes. if len(strings.TrimSpace(s)) == 0 { continue } @@ -299,3 +302,40 @@ func (db *DB) upgrade() error { } return nil } + +func parseSQLStatements(sqlContent string) []string { + var statements []string + var currentStatement strings.Builder + + lines := strings.Split(sqlContent, "\n") + var inFunction bool + + for _, line := range lines { + trimmedLine := strings.TrimSpace(line) + if trimmedLine == "" || strings.HasPrefix(trimmedLine, "--") { + // Skip empty lines and comments. + continue + } + + // Detect function blocks starting or ending. + if strings.Contains(trimmedLine, "$$") { + inFunction = !inFunction + } + + // Add the line to the current statement. + currentStatement.WriteString(line + "\n") + + // If we're not in a function and the line ends with a semicolon, or we just closed a function block. + if (!inFunction && strings.HasSuffix(trimmedLine, ";")) || (strings.Contains(trimmedLine, "$$") && !inFunction) { + statements = append(statements, currentStatement.String()) + currentStatement.Reset() + } + } + + // Add any remaining statement not followed by a semicolon (should not happen in well-formed SQL but just in case). + if currentStatement.Len() > 0 { + statements = append(statements, currentStatement.String()) + } + + return statements +} diff --git a/lib/harmony/harmonydb/sql/20230719-harmony.sql b/lib/harmony/harmonydb/sql/20230719-harmony.sql deleted file mode 100644 index e7b1795c579..00000000000 --- a/lib/harmony/harmonydb/sql/20230719-harmony.sql +++ /dev/null @@ -1,51 +0,0 @@ -/* For HarmonyTask base implementation. */ - -CREATE TABLE harmony_machines ( - id SERIAL PRIMARY KEY NOT NULL, - last_contact TIMESTAMP NOT NULL DEFAULT current_timestamp, - host_and_port varchar(300) NOT NULL, - cpu INTEGER NOT NULL, - ram BIGINT NOT NULL, - gpu FLOAT NOT NULL -); - -CREATE TABLE harmony_task ( - id SERIAL PRIMARY KEY NOT NULL, - initiated_by INTEGER, - update_time TIMESTAMP NOT NULL DEFAULT current_timestamp, - posted_time TIMESTAMP NOT NULL, - owner_id INTEGER REFERENCES harmony_machines (id) ON DELETE SET NULL, - added_by INTEGER NOT NULL, - previous_task INTEGER, - name varchar(16) NOT NULL -); -COMMENT ON COLUMN harmony_task.initiated_by IS 'The task ID whose completion occasioned this task.'; -COMMENT ON COLUMN harmony_task.owner_id IS 'The foreign key to harmony_machines.'; -COMMENT ON COLUMN harmony_task.name IS 'The name of the task type.'; -COMMENT ON COLUMN harmony_task.owner_id IS 'may be null if between owners or not yet taken'; -COMMENT ON COLUMN harmony_task.update_time IS 'When it was last modified. not a heartbeat'; - -CREATE TABLE harmony_task_history ( - id SERIAL PRIMARY KEY NOT NULL, - task_id INTEGER NOT NULL, - name VARCHAR(16) NOT NULL, - posted TIMESTAMP NOT NULL, - work_start TIMESTAMP NOT NULL, - work_end TIMESTAMP NOT NULL, - result BOOLEAN NOT NULL, - err varchar -); -COMMENT ON COLUMN harmony_task_history.result IS 'Use to detemine if this was a successful run.'; - -CREATE TABLE harmony_task_follow ( - id SERIAL PRIMARY KEY NOT NULL, - owner_id INTEGER NOT NULL REFERENCES harmony_machines (id) ON DELETE CASCADE, - to_type VARCHAR(16) NOT NULL, - from_type VARCHAR(16) NOT NULL -); - -CREATE TABLE harmony_task_impl ( - id SERIAL PRIMARY KEY NOT NULL, - owner_id INTEGER NOT NULL REFERENCES harmony_machines (id) ON DELETE CASCADE, - name VARCHAR(16) NOT NULL -); \ No newline at end of file diff --git a/lib/harmony/harmonydb/sql/20230823-wdpost.sql b/lib/harmony/harmonydb/sql/20230823-wdpost.sql deleted file mode 100644 index c6f993d7664..00000000000 --- a/lib/harmony/harmonydb/sql/20230823-wdpost.sql +++ /dev/null @@ -1,48 +0,0 @@ -create table wdpost_partition_tasks -( - task_id bigint not null - constraint wdpost_partition_tasks_pk - primary key, - sp_id bigint not null, - proving_period_start bigint not null, - deadline_index bigint not null, - partition_index bigint not null, - constraint wdpost_partition_tasks_identity_key - unique (sp_id, proving_period_start, deadline_index, partition_index) -); - -comment on column wdpost_partition_tasks.task_id is 'harmonytask task ID'; -comment on column wdpost_partition_tasks.sp_id is 'storage provider ID'; -comment on column wdpost_partition_tasks.proving_period_start is 'proving period start'; -comment on column wdpost_partition_tasks.deadline_index is 'deadline index within the proving period'; -comment on column wdpost_partition_tasks.partition_index is 'partition index within the deadline'; - -create table wdpost_proofs -( - sp_id bigint not null, - proving_period_start bigint not null, - deadline bigint not null, - partition bigint not null, - submit_at_epoch bigint not null, - submit_by_epoch bigint not null, - proof_params bytea, - - submit_task_id bigint, - message_cid text, - - constraint wdpost_proofs_identity_key - unique (sp_id, proving_period_start, deadline, partition) -); - -create table wdpost_recovery_tasks -( - task_id bigint not null - constraint wdpost_recovery_tasks_pk - primary key, - sp_id bigint not null, - proving_period_start bigint not null, - deadline_index bigint not null, - partition_index bigint not null, - constraint wdpost_recovery_tasks_identity_key - unique (sp_id, proving_period_start, deadline_index, partition_index) -); \ No newline at end of file diff --git a/lib/harmony/harmonydb/sql/20230919-config.sql b/lib/harmony/harmonydb/sql/20230919-config.sql deleted file mode 100644 index 84699a0d546..00000000000 --- a/lib/harmony/harmonydb/sql/20230919-config.sql +++ /dev/null @@ -1,5 +0,0 @@ -CREATE TABLE harmony_config ( - id SERIAL PRIMARY KEY NOT NULL, - title VARCHAR(300) UNIQUE NOT NULL, - config TEXT NOT NULL -); \ No newline at end of file diff --git a/lib/harmony/harmonydb/sql/20231103-chain_sends.sql b/lib/harmony/harmonydb/sql/20231103-chain_sends.sql deleted file mode 100644 index e70cf373811..00000000000 --- a/lib/harmony/harmonydb/sql/20231103-chain_sends.sql +++ /dev/null @@ -1,55 +0,0 @@ -create table message_sends -( - from_key text not null, - to_addr text not null, - send_reason text not null, - send_task_id bigint not null, - - unsigned_data bytea not null, - unsigned_cid text not null, - - nonce bigint, - signed_data bytea, - signed_json jsonb, - signed_cid text, - - send_time timestamp default null, - send_success boolean default null, - send_error text, - - constraint message_sends_pk - primary key (send_task_id, from_key) -); - -comment on column message_sends.from_key is 'text f[1/3/4]... address'; -comment on column message_sends.to_addr is 'text f[0/1/2/3/4]... address'; -comment on column message_sends.send_reason is 'optional description of send reason'; -comment on column message_sends.send_task_id is 'harmony task id of the send task'; - -comment on column message_sends.unsigned_data is 'unsigned message data'; -comment on column message_sends.unsigned_cid is 'unsigned message cid'; - -comment on column message_sends.nonce is 'assigned message nonce, set while the send task is executing'; -comment on column message_sends.signed_data is 'signed message data, set while the send task is executing'; -comment on column message_sends.signed_cid is 'signed message cid, set while the send task is executing'; - -comment on column message_sends.send_time is 'time when the send task was executed, set after pushing the message to the network'; -comment on column message_sends.send_success is 'whether this message was broadcasted to the network already, null if not yet attempted, true if successful, false if failed'; -comment on column message_sends.send_error is 'error message if send_success is false'; - -create unique index message_sends_success_index - on message_sends (from_key, nonce) - where send_success is not false; - -comment on index message_sends_success_index is -'message_sends_success_index enforces sender/nonce uniqueness, it is a conditional index that only indexes rows where send_success is not false. This allows us to have multiple rows with the same sender/nonce, as long as only one of them was successfully broadcasted (true) to the network or is in the process of being broadcasted (null).'; - -create table message_send_locks -( - from_key text not null, - task_id bigint not null, - claimed_at timestamp not null, - - constraint message_send_locks_pk - primary key (from_key) -); diff --git a/lib/harmony/harmonydb/sql/20231110-mining_tasks.sql b/lib/harmony/harmonydb/sql/20231110-mining_tasks.sql deleted file mode 100644 index 15b478f4dd1..00000000000 --- a/lib/harmony/harmonydb/sql/20231110-mining_tasks.sql +++ /dev/null @@ -1,39 +0,0 @@ -create table mining_tasks -( - task_id bigint not null - constraint mining_tasks_pk - primary key, - sp_id bigint not null, - epoch bigint not null, - base_compute_time timestamp not null, - - won bool not null default false, - mined_cid text, - mined_header jsonb, - mined_at timestamp, - - submitted_at timestamp, - - constraint mining_tasks_sp_epoch - unique (sp_id, epoch) -); - -create table mining_base_block -( - id bigserial not null - constraint mining_base_block_pk - primary key, - task_id bigint not null - constraint mining_base_block_mining_tasks_task_id_fk - references mining_tasks - on delete cascade, - sp_id bigint, - block_cid text not null, - - no_win bool not null default false, - - constraint mining_base_block_pk2 - unique (sp_id, task_id, block_cid) -); - -CREATE UNIQUE INDEX mining_base_block_cid_k ON mining_base_block (sp_id, block_cid) WHERE no_win = false; diff --git a/lib/harmony/harmonydb/sql/20231113-harmony_taskhistory_oops.sql b/lib/harmony/harmonydb/sql/20231113-harmony_taskhistory_oops.sql deleted file mode 100644 index 7a71d98aead..00000000000 --- a/lib/harmony/harmonydb/sql/20231113-harmony_taskhistory_oops.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE harmony_task_history ADD COLUMN completed_by_host_and_port varchar(300) NOT NULL; \ No newline at end of file diff --git a/lib/harmony/harmonydb/sql/20231120-testing1.sql b/lib/harmony/harmonydb/sql/20231120-testing1.sql deleted file mode 100644 index 71daaef6944..00000000000 --- a/lib/harmony/harmonydb/sql/20231120-testing1.sql +++ /dev/null @@ -1,8 +0,0 @@ -CREATE TABLE harmony_test ( - task_id bigint - constraint harmony_test_pk - primary key, - options text, - result text -); -ALTER TABLE wdpost_proofs ADD COLUMN test_task_id bigint; \ No newline at end of file diff --git a/lib/harmony/harmonydb/sql/20231217-sdr-pipeline.sql b/lib/harmony/harmonydb/sql/20231217-sdr-pipeline.sql deleted file mode 100644 index 31f10313968..00000000000 --- a/lib/harmony/harmonydb/sql/20231217-sdr-pipeline.sql +++ /dev/null @@ -1,135 +0,0 @@ --- NOTE: task_ids can be the same between different task types and between different sectors --- e.g. SN-supraseal doing 128 sdr/TreeC/TreeR with the same task_id - -create table sectors_sdr_pipeline ( - sp_id bigint not null, - sector_number bigint not null, - - -- at request time - create_time timestamp not null default current_timestamp, - reg_seal_proof int not null, - - -- sdr - ticket_epoch bigint, - ticket_value bytea, - - task_id_sdr bigint, - after_sdr bool not null default false, - - -- tree D - tree_d_cid text, -- commd from treeD compute, should match comm_d_cid - - task_id_tree_d bigint, - after_tree_d bool not null default false, - - -- tree C - task_id_tree_c bigint, - after_tree_c bool not null default false, - - -- tree R - tree_r_cid text, -- commr from treeR compute - - task_id_tree_r bigint, - after_tree_r bool not null default false, - - -- precommit message sending - precommit_msg_cid text, - - task_id_precommit_msg bigint, - after_precommit_msg bool not null default false, - - -- precommit message wait - seed_epoch bigint, - precommit_msg_tsk bytea, - after_precommit_msg_success bool not null default false, - - -- seed - seed_value bytea, - - -- Commit (PoRep snark) - task_id_porep bigint, - porep_proof bytea, - after_porep bool not null default false, - - -- Finalize (trim cache) - task_id_finalize bigint, - after_finalize bool not null default false, - - -- MoveStorage (move data to storage) - task_id_move_storage bigint, - after_move_storage bool not null default false, - - -- Commit message sending - commit_msg_cid text, - - task_id_commit_msg bigint, - after_commit_msg bool not null default false, - - -- Commit message wait - commit_msg_tsk bytea, - after_commit_msg_success bool not null default false, - - -- Failure handling - failed bool not null default false, - failed_at timestamp, - failed_reason varchar(20) not null default '', - failed_reason_msg text not null default '', - - -- foreign key - -- note: those foreign keys are a part of the retry mechanism. If a task - -- fails due to retry limit, it will drop the assigned task_id, and the - -- poller will reassign the task to a new node if it deems the task is - -- still valid to be retried. - foreign key (task_id_sdr) references harmony_task (id) on delete set null, - foreign key (task_id_tree_d) references harmony_task (id) on delete set null, - foreign key (task_id_tree_c) references harmony_task (id) on delete set null, - foreign key (task_id_tree_r) references harmony_task (id) on delete set null, - foreign key (task_id_precommit_msg) references harmony_task (id) on delete set null, - foreign key (task_id_porep) references harmony_task (id) on delete set null, - foreign key (task_id_finalize) references harmony_task (id) on delete set null, - foreign key (task_id_move_storage) references harmony_task (id) on delete set null, - foreign key (task_id_commit_msg) references harmony_task (id) on delete set null, - - -- constraints - primary key (sp_id, sector_number) -); - -create table sectors_sdr_initial_pieces ( - sp_id bigint not null, - sector_number bigint not null, - - piece_index bigint not null, - piece_cid text not null, - piece_size bigint not null, -- padded size - - -- data source - data_url text not null, - data_headers jsonb not null default '{}', - data_raw_size bigint not null, - data_delete_on_finalize bool not null, - - -- deal info - f05_publish_cid text, - f05_deal_id bigint, - f05_deal_proposal jsonb, - f05_deal_start_epoch bigint, - f05_deal_end_epoch bigint, - - -- ddo deal info - -- added in 20240402-sdr-pipeline-ddo-deal-info.sql - -- direct_start_epoch bigint, - -- direct_end_epoch bigint, - -- direct_piece_activation_manifest jsonb, - - -- foreign key - foreign key (sp_id, sector_number) references sectors_sdr_pipeline (sp_id, sector_number) on delete cascade, - - primary key (sp_id, sector_number, piece_index) -); - -comment on column sectors_sdr_initial_pieces.piece_size is 'padded size of the piece'; - -create table sectors_allocated_numbers ( - sp_id bigint not null primary key, - allocated jsonb not null -); diff --git a/lib/harmony/harmonydb/sql/20231225-message-waits.sql b/lib/harmony/harmonydb/sql/20231225-message-waits.sql deleted file mode 100644 index 4143f3a56e9..00000000000 --- a/lib/harmony/harmonydb/sql/20231225-message-waits.sql +++ /dev/null @@ -1,13 +0,0 @@ -create table message_waits ( - signed_message_cid text primary key, - waiter_machine_id int references harmony_machines (id) on delete set null, - - executed_tsk_cid text, - executed_tsk_epoch bigint, - executed_msg_cid text, - executed_msg_data jsonb, - - executed_rcpt_exitcode bigint, - executed_rcpt_return bytea, - executed_rcpt_gas_used bigint -) diff --git a/lib/harmony/harmonydb/sql/20240212-common-layers.sql b/lib/harmony/harmonydb/sql/20240212-common-layers.sql deleted file mode 100644 index cf72e175023..00000000000 --- a/lib/harmony/harmonydb/sql/20240212-common-layers.sql +++ /dev/null @@ -1,42 +0,0 @@ -INSERT INTO harmony_config (title, config) VALUES - ('post', ' - [Subsystems] - EnableWindowPost = true - EnableWinningPost = true - '), - - ('gui', ' - [Subsystems] - EnableWebGui = true - '), - - ('seal', ' - [Subsystems] - EnableSealSDR = true - EnableSealSDRTrees = true - EnableSendPrecommitMsg = true - EnablePoRepProof = true - EnableSendCommitMsg = true - EnableMoveStorage = true - '), - - ('seal-gpu', ' - [Subsystems] - EnableSealSDRTrees = true - EnableSendPrecommitMsg = true - '), - ('seal-snark', ' - [Subsystems] - EnablePoRepProof = true - EnableSendCommitMsg = true - '), - ('sdr', ' - [Subsystems] - EnableSealSDR = true - '), - - ('storage', ' - [Subsystems] - EnableMoveStorage = true - ') - ON CONFLICT (title) DO NOTHING; -- SPs may have these names defined already. \ No newline at end of file diff --git a/lib/harmony/harmonydb/sql/20240228-piece-park.sql b/lib/harmony/harmonydb/sql/20240228-piece-park.sql deleted file mode 100644 index 9ee6b447f39..00000000000 --- a/lib/harmony/harmonydb/sql/20240228-piece-park.sql +++ /dev/null @@ -1,35 +0,0 @@ -create table parked_pieces ( - id bigserial primary key, - created_at timestamp default current_timestamp, - - piece_cid text not null, - piece_padded_size bigint not null, - piece_raw_size bigint not null, - - complete boolean not null default false, - task_id bigint default null, - - cleanup_task_id bigint default null, - - foreign key (task_id) references harmony_task (id) on delete set null, - foreign key (cleanup_task_id) references harmony_task (id) on delete set null, - unique (piece_cid) -); - -/* - * This table is used to keep track of the references to the parked pieces - * so that we can delete them when they are no longer needed. - * - * All references into the parked_pieces table should be done through this table. - * - * data_url is optional for refs which also act as data sources. - */ -create table parked_piece_refs ( - ref_id bigserial primary key, - piece_id bigint not null, - - data_url text, - data_headers jsonb not null default '{}', - - foreign key (piece_id) references parked_pieces(id) on delete cascade -); diff --git a/lib/harmony/harmonydb/sql/20240317-web-summary-index.sql b/lib/harmony/harmonydb/sql/20240317-web-summary-index.sql deleted file mode 100644 index 28902448d05..00000000000 --- a/lib/harmony/harmonydb/sql/20240317-web-summary-index.sql +++ /dev/null @@ -1,7 +0,0 @@ -/* Used for webui clusterMachineSummary */ -CREATE INDEX harmony_task_history_work_index - ON harmony_task_history (completed_by_host_and_port ASC, name ASC, result ASC, work_end DESC); - -/* Used for webui actorSummary sp wins */ -CREATE INDEX mining_tasks_won_sp_id_base_compute_time_index - ON mining_tasks (won ASC, sp_id ASC, base_compute_time DESC); diff --git a/lib/harmony/harmonydb/sql/20240402-sdr-pipeline-ddo-deal-info.sql b/lib/harmony/harmonydb/sql/20240402-sdr-pipeline-ddo-deal-info.sql deleted file mode 100644 index 2230ef57492..00000000000 --- a/lib/harmony/harmonydb/sql/20240402-sdr-pipeline-ddo-deal-info.sql +++ /dev/null @@ -1,8 +0,0 @@ -ALTER TABLE sectors_sdr_initial_pieces - ADD COLUMN direct_start_epoch BIGINT; - -ALTER TABLE sectors_sdr_initial_pieces - ADD COLUMN direct_end_epoch BIGINT; - -ALTER TABLE sectors_sdr_initial_pieces - ADD COLUMN direct_piece_activation_manifest JSONB; diff --git a/lib/harmony/harmonydb/sql/20240404-machine_detail.sql b/lib/harmony/harmonydb/sql/20240404-machine_detail.sql deleted file mode 100644 index ae6de095124..00000000000 --- a/lib/harmony/harmonydb/sql/20240404-machine_detail.sql +++ /dev/null @@ -1,12 +0,0 @@ -CREATE TABLE harmony_machine_details ( - id SERIAL PRIMARY KEY, - tasks TEXT, - layers TEXT, - startup_time TIMESTAMP, - miners TEXT, - machine_id INTEGER, - FOREIGN KEY (machine_id) REFERENCES harmony_machines(id) ON DELETE CASCADE -); - -CREATE UNIQUE INDEX machine_details_machine_id ON harmony_machine_details(machine_id); - diff --git a/lib/harmony/harmonydb/sql/20240416-harmony_singleton_task.sql b/lib/harmony/harmonydb/sql/20240416-harmony_singleton_task.sql deleted file mode 100644 index d565cfa4702..00000000000 --- a/lib/harmony/harmonydb/sql/20240416-harmony_singleton_task.sql +++ /dev/null @@ -1,8 +0,0 @@ -create table harmony_task_singletons ( - task_name varchar(255) not null, - task_id bigint, - last_run_time timestamp, - - primary key (task_name), - foreign key (task_id) references harmony_task (id) on delete set null -); diff --git a/lib/harmony/harmonydb/sql/20240417-sector_index_gc.sql b/lib/harmony/harmonydb/sql/20240417-sector_index_gc.sql deleted file mode 100644 index e9771d9f31c..00000000000 --- a/lib/harmony/harmonydb/sql/20240417-sector_index_gc.sql +++ /dev/null @@ -1,13 +0,0 @@ -create table sector_path_url_liveness ( - storage_id text, - url text, - - last_checked timestamp not null, - last_live timestamp, - last_dead timestamp, - last_dead_reason text, - - primary key (storage_id, url), - - foreign key (storage_id) references storage_path (storage_id) on delete cascade -) diff --git a/lib/harmony/harmonydb/userfuncs.go b/lib/harmony/harmonydb/userfuncs.go index 1f39504b81e..da9d1a5980d 100644 --- a/lib/harmony/harmonydb/userfuncs.go +++ b/lib/harmony/harmonydb/userfuncs.go @@ -114,6 +114,10 @@ func (d dbscanRows) Columns() ([]string, error) { }), nil } +func (d dbscanRows) NextResultSet() bool { + return false +} + /* Select multiple rows into a slice using name matching Ex: diff --git a/lib/harmony/harmonytask/doc.go b/lib/harmony/harmonytask/doc.go deleted file mode 100644 index f9e5a989885..00000000000 --- a/lib/harmony/harmonytask/doc.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Package harmonytask implements a pure (no task logic), distributed -task manager. This clean interface allows a task implementer to completely -avoid being concerned with task scheduling and management. -It's based on the idea of tasks as small units of work broken from other -work by hardware, parallelizabilty, reliability, or any other reason. -Workers will be Greedy: vaccuuming up their favorite jobs from a list. -Once 1 task is accepted, harmonydb tries to get other task runner -machines to accept work (round robin) before trying again to accept. -* -Mental Model: - - Things that block tasks: - - task not registered for any running server - - max was specified and reached - - resource exhaustion - - CanAccept() interface (per-task implmentation) does not accept it. - Ways tasks start: - - DB Read every 3 seconds - - Task was added (to db) by this process - Ways tasks get added: - - Async Listener task (for chain, etc) - - Followers: Tasks get added because another task completed - When Follower collectors run: - - If both sides are process-local, then this process will pick it up. - - If properly registered already, the http endpoint will be tried to start it. - - Otherwise, at the listen interval during db scrape it will be found. - How duplicate tasks are avoided: - - that's up to the task definition, but probably a unique key - -* -To use: -1.Implement TaskInterface for a new task. -2. Have New() receive this & all other ACTIVE implementations. -* -* -As we are not expecting DBAs in this database, it's important to know -what grows uncontrolled. The only growing harmony_* table is -harmony_task_history (somewhat quickly). These will need a -clean-up for after the task data could never be acted upon. -but the design **requires** extraInfo tables to grow until the task's -info could not possibly be used by a following task, including slow -release rollout. This would normally be in the order of months old. -* -Other possible enhancements include more collaborative coordination -to assign a task to machines closer to the data. - -__Database_Behavior__ -harmony_task is the list of work that has not been completed. - - AddTaskFunc manages the additions, but is designed to have its - transactions failed-out on overlap with a similar task already written. - It's up to the TaskInterface implementer to discover this overlap via - some other table it uses (since overlap can mean very different things). - -harmony_task_history - - This holds transactions that completed or saw too many retries. It also - serves as input for subsequent (follower) tasks to kick off. This is not - done machine-internally because a follower may not be on the same machine - as the previous task. - -harmony_task_machines - - Managed by lib/harmony/resources, this is a reference to machines registered - via the resources. This registration does not obligate the machine to - anything, but serves as a discovery mechanism. Paths are hostnames + ports - which are presumed to support http, but this assumption is only used by - the task system. -*/ -package harmonytask diff --git a/lib/harmony/harmonytask/harmonytask.go b/lib/harmony/harmonytask/harmonytask.go deleted file mode 100644 index 0c66891d0c7..00000000000 --- a/lib/harmony/harmonytask/harmonytask.go +++ /dev/null @@ -1,387 +0,0 @@ -package harmonytask - -import ( - "context" - "fmt" - "strconv" - "sync/atomic" - "time" - - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/resources" -) - -// Consts (except for unit test) -var POLL_DURATION = time.Second * 3 // Poll for Work this frequently -var POLL_NEXT_DURATION = 100 * time.Millisecond // After scheduling a task, wait this long before scheduling another -var CLEANUP_FREQUENCY = 5 * time.Minute // Check for dead workers this often * everyone -var FOLLOW_FREQUENCY = 1 * time.Minute // Check for work to follow this often - -type TaskTypeDetails struct { - // Max returns how many tasks this machine can run of this type. - // Zero (default) or less means unrestricted. - Max int - - // Name is the task name to be added to the task list. - Name string - - // Peak costs to Do() the task. - Cost resources.Resources - - // Max Failure count before the job is dropped. - // 0 = retry forever - MaxFailures uint - - // Follow another task's completion via this task's creation. - // The function should populate extraInfo from data - // available from the previous task's tables, using the given TaskID. - // It should also return success if the trigger succeeded. - // NOTE: if refatoring tasks, see if your task is - // necessary. Ex: Is the sector state correct for your stage to run? - Follows map[string]func(TaskID, AddTaskFunc) (bool, error) - - // IAmBored is called (when populated) when there's capacity but no work. - // Tasks added will be proposed to CanAccept() on this machine. - // CanAccept() can read taskEngine's WorkOrigin string to learn about a task. - // Ex: make new CC sectors, clean-up, or retrying pipelines that failed in later states. - IAmBored func(AddTaskFunc) error -} - -// TaskInterface must be implemented in order to have a task used by harmonytask. -type TaskInterface interface { - // Do the task assigned. Call stillOwned before making single-writer-only - // changes to ensure the work has not been stolen. - // This is the ONLY function that should attempt to do the work, and must - // ONLY be called by harmonytask. - // Indicate if the task no-longer needs scheduling with done=true including - // cases where it's past the deadline. - Do(taskID TaskID, stillOwned func() bool) (done bool, err error) - - // CanAccept should return if the task can run on this machine. It should - // return null if the task type is not allowed on this machine. - // It should select the task it most wants to accomplish. - // It is also responsible for determining & reserving disk space (including scratch). - CanAccept([]TaskID, *TaskEngine) (*TaskID, error) - - // TypeDetails() returns static details about how this task behaves and - // how this machine will run it. Read once at the beginning. - TypeDetails() TaskTypeDetails - - // This listener will consume all external sources continuously for work. - // Do() may also be called from a backlog of work. This must not - // start doing the work (it still must be scheduled). - // Note: Task de-duplication should happen in ExtraInfoFunc by - // returning false, typically by determining from the tx that the work - // exists already. The easy way is to have a unique joint index - // across all fields that will be common. - // Adder should typically only add its own task type, but multiple - // is possible for when 1 trigger starts 2 things. - // Usage Example: - // func (b *BazType)Adder(addTask AddTaskFunc) { - // for { - // bazMaker := <- bazChannel - // addTask("baz", func(t harmonytask.TaskID, txn db.Transaction) (bool, error) { - // _, err := txn.Exec(`INSERT INTO bazInfoTable (taskID, qix, mot) - // VALUES ($1,$2,$3)`, id, bazMaker.qix, bazMaker.mot) - // if err != nil { - // scream(err) - // return false - // } - // return true - // }) - // } - // } - Adder(AddTaskFunc) -} - -// AddTaskFunc is responsible for adding a task's details "extra info" to the DB. -// It should return true if the task should be added, false if it was already there. -// This is typically accomplished with a "unique" index on your detals table that -// would cause the insert to fail. -// The error indicates that instead of a conflict (which we should ignore) that we -// actually have a serious problem that needs to be logged with context. -type AddTaskFunc func(extraInfo func(TaskID, *harmonydb.Tx) (shouldCommit bool, seriousError error)) - -type TaskEngine struct { - // Static After New() - ctx context.Context - handlers []*taskTypeHandler - db *harmonydb.DB - reg *resources.Reg - grace context.CancelFunc - taskMap map[string]*taskTypeHandler - ownerID int - follows map[string][]followStruct - hostAndPort string - - // synchronous to the single-threaded poller - lastFollowTime time.Time - lastCleanup atomic.Value - WorkOrigin string -} -type followStruct struct { - f func(TaskID, AddTaskFunc) (bool, error) - h *taskTypeHandler - name string -} - -type TaskID int - -// New creates all the task definitions. Note that TaskEngine -// knows nothing about the tasks themselves and serves to be a -// generic container for common work -func New( - db *harmonydb.DB, - impls []TaskInterface, - hostnameAndPort string) (*TaskEngine, error) { - - reg, err := resources.Register(db, hostnameAndPort) - if err != nil { - return nil, fmt.Errorf("cannot get resources: %w", err) - } - ctx, grace := context.WithCancel(context.Background()) - e := &TaskEngine{ - ctx: ctx, - grace: grace, - db: db, - reg: reg, - ownerID: reg.Resources.MachineID, // The current number representing "hostAndPort" - taskMap: make(map[string]*taskTypeHandler, len(impls)), - follows: make(map[string][]followStruct), - hostAndPort: hostnameAndPort, - } - e.lastCleanup.Store(time.Now()) - for _, c := range impls { - h := taskTypeHandler{ - TaskInterface: c, - TaskTypeDetails: c.TypeDetails(), - TaskEngine: e, - } - - if len(h.Name) > 16 { - return nil, fmt.Errorf("task name too long: %s, max 16 characters", h.Name) - } - - e.handlers = append(e.handlers, &h) - e.taskMap[h.TaskTypeDetails.Name] = &h - } - - // resurrect old work - { - var taskRet []struct { - ID int - Name string - } - - err := db.Select(e.ctx, &taskRet, `SELECT id, name from harmony_task WHERE owner_id=$1`, e.ownerID) - if err != nil { - return nil, err - } - for _, w := range taskRet { - // edge-case: if old assignments are not available tasks, unlock them. - h := e.taskMap[w.Name] - if h == nil { - _, err := db.Exec(e.ctx, `UPDATE harmony_task SET owner=NULL WHERE id=$1`, w.ID) - if err != nil { - log.Errorw("Cannot remove self from owner field", "error", err) - continue // not really fatal, but not great - } - } - if !h.considerWork(WorkSourceRecover, []TaskID{TaskID(w.ID)}) { - log.Errorw("Strange: Unable to accept previously owned task", "id", w.ID, "type", w.Name) - } - } - } - for _, h := range e.handlers { - go h.Adder(h.AddTask) - } - go e.poller() - - return e, nil -} - -// GracefullyTerminate hangs until all present tasks have completed. -// Call this to cleanly exit the process. As some processes are long-running, -// passing a deadline will ignore those still running (to be picked-up later). -func (e *TaskEngine) GracefullyTerminate() { - - // call the cancel func to avoid picking up any new tasks. Running tasks have context.Background() - // Call shutdown to stop posting heartbeat to DB. - e.grace() - e.reg.Shutdown() - - // If there are any Post tasks then wait till Timeout and check again - // When no Post tasks are active, break out of loop and call the shutdown function - for { - timeout := time.Millisecond - for _, h := range e.handlers { - if h.TaskTypeDetails.Name == "WinPost" && h.Count.Load() > 0 { - timeout = time.Second - log.Infof("node shutdown deferred for %f seconds", timeout.Seconds()) - continue - } - if h.TaskTypeDetails.Name == "WdPost" && h.Count.Load() > 0 { - timeout = time.Second * 3 - log.Infof("node shutdown deferred for %f seconds due to running WdPost task", timeout.Seconds()) - continue - } - - if h.TaskTypeDetails.Name == "WdPostSubmit" && h.Count.Load() > 0 { - timeout = time.Second - log.Infof("node shutdown deferred for %f seconds due to running WdPostSubmit task", timeout.Seconds()) - continue - } - - if h.TaskTypeDetails.Name == "WdPostRecover" && h.Count.Load() > 0 { - timeout = time.Second - log.Infof("node shutdown deferred for %f seconds due to running WdPostRecover task", timeout.Seconds()) - continue - } - - // Test tasks for itest - if h.TaskTypeDetails.Name == "ThingOne" && h.Count.Load() > 0 { - timeout = time.Second - log.Infof("node shutdown deferred for %f seconds due to running itest task", timeout.Seconds()) - continue - } - } - if timeout > time.Millisecond { - time.Sleep(timeout) - continue - } - break - } - - return -} - -func (e *TaskEngine) poller() { - nextWait := POLL_NEXT_DURATION - for { - select { - case <-time.After(nextWait): // Find work periodically - case <-e.ctx.Done(): ///////////////////// Graceful exit - return - } - nextWait = POLL_DURATION - - accepted := e.pollerTryAllWork() - if accepted { - nextWait = POLL_NEXT_DURATION - } - if time.Since(e.lastFollowTime) > FOLLOW_FREQUENCY { - e.followWorkInDB() - } - } -} - -// followWorkInDB implements "Follows" -func (e *TaskEngine) followWorkInDB() { - // Step 1: What are we following? - var lastFollowTime time.Time - lastFollowTime, e.lastFollowTime = e.lastFollowTime, time.Now() - - for fromName, srcs := range e.follows { - var cList []int // Which work is done (that we follow) since we last checked? - err := e.db.Select(e.ctx, &cList, `SELECT h.task_id FROM harmony_task_history - WHERE h.work_end>$1 AND h.name=$2`, lastFollowTime.UTC(), fromName) - if err != nil { - log.Error("Could not query DB: ", err) - return - } - for _, src := range srcs { - for _, workAlreadyDone := range cList { // Were any tasks made to follow these tasks? - var ct int - err := e.db.QueryRow(e.ctx, `SELECT COUNT(*) FROM harmony_task - WHERE name=$1 AND previous_task=$2`, src.h.Name, workAlreadyDone).Scan(&ct) - if err != nil { - log.Error("Could not query harmony_task: ", err) - return // not recoverable here - } - if ct > 0 { - continue - } - // we need to create this task - b, err := src.h.Follows[fromName](TaskID(workAlreadyDone), src.h.AddTask) - if err != nil { - log.Errorw("Could not follow: ", "error", err) - continue - } - if !b { - // But someone may have beaten us to it. - log.Debugf("Unable to add task %s following Task(%d, %s)", src.h.Name, workAlreadyDone, fromName) - } - } - } - } -} - -// pollerTryAllWork starts the next 1 task -func (e *TaskEngine) pollerTryAllWork() bool { - if time.Since(e.lastCleanup.Load().(time.Time)) > CLEANUP_FREQUENCY { - e.lastCleanup.Store(time.Now()) - resources.CleanupMachines(e.ctx, e.db) - } - for _, v := range e.handlers { - if err := v.AssertMachineHasCapacity(); err != nil { - log.Debugf("skipped scheduling %s type tasks on due to %s", v.Name, err.Error()) - continue - } - var unownedTasks []TaskID - err := e.db.Select(e.ctx, &unownedTasks, `SELECT id - FROM harmony_task - WHERE owner_id IS NULL AND name=$1 - ORDER BY update_time`, v.Name) - if err != nil { - log.Error("Unable to read work ", err) - continue - } - if len(unownedTasks) > 0 { - accepted := v.considerWork(WorkSourcePoller, unownedTasks) - if accepted { - return true // accept new work slowly and in priority order - } - log.Warn("Work not accepted for " + strconv.Itoa(len(unownedTasks)) + " " + v.Name + " task(s)") - } - } - // if no work was accepted, are we bored? Then find work in priority order. - for _, v := range e.handlers { - v := v - if v.AssertMachineHasCapacity() != nil { - continue - } - if v.TaskTypeDetails.IAmBored != nil { - var added []TaskID - err := v.TaskTypeDetails.IAmBored(func(extraInfo func(TaskID, *harmonydb.Tx) (shouldCommit bool, seriousError error)) { - v.AddTask(func(tID TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { - b, err := extraInfo(tID, tx) - if err == nil { - added = append(added, tID) - } - return b, err - }) - }) - if err != nil { - log.Error("IAmBored failed: ", err) - continue - } - if added != nil { // tiny chance a fail could make these bogus, but considerWork should then fail. - v.considerWork(WorkSourceIAmBored, added) - } - } - } - - return false -} - -// ResourcesAvailable determines what resources are still unassigned. -func (e *TaskEngine) ResourcesAvailable() resources.Resources { - tmp := e.reg.Resources - for _, t := range e.handlers { - ct := t.Count.Load() - tmp.Cpu -= int(ct) * t.Cost.Cpu - tmp.Gpu -= float64(ct) * t.Cost.Gpu - tmp.Ram -= uint64(ct) * t.Cost.Ram - } - return tmp -} diff --git a/lib/harmony/harmonytask/singleton_task.go b/lib/harmony/harmonytask/singleton_task.go deleted file mode 100644 index 72003341026..00000000000 --- a/lib/harmony/harmonytask/singleton_task.go +++ /dev/null @@ -1,52 +0,0 @@ -package harmonytask - -import ( - "errors" - "time" - - "github.com/jackc/pgx/v5" - - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/passcall" -) - -func SingletonTaskAdder(minInterval time.Duration, task TaskInterface) func(AddTaskFunc) error { - return passcall.Every(minInterval, func(add AddTaskFunc) error { - taskName := task.TypeDetails().Name - - add(func(taskID TaskID, tx *harmonydb.Tx) (shouldCommit bool, err error) { - var existingTaskID *int64 - var lastRunTime time.Time - - // Query to check the existing task entry - err = tx.QueryRow(`SELECT task_id, last_run_time FROM harmony_task_singletons WHERE task_name = $1`, taskName).Scan(&existingTaskID, &lastRunTime) - if err != nil { - if !errors.Is(err, pgx.ErrNoRows) { - return false, err // return error if query failed and it's not because of missing row - } - } - - now := time.Now().UTC() - // Determine if the task should run based on the absence of a record or outdated last_run_time - shouldRun := err == pgx.ErrNoRows || (existingTaskID == nil && lastRunTime.Add(minInterval).Before(now)) - if !shouldRun { - return false, nil - } - - // Conditionally insert or update the task entry - n, err := tx.Exec(` - INSERT INTO harmony_task_singletons (task_name, task_id, last_run_time) - VALUES ($1, $2, $3) - ON CONFLICT (task_name) DO UPDATE - SET task_id = COALESCE(harmony_task_singletons.task_id, $2), - last_run_time = $3 - WHERE harmony_task_singletons.task_id IS NULL - `, taskName, taskID, now) - if err != nil { - return false, err - } - return n > 0, nil - }) - return nil - }) -} diff --git a/lib/harmony/harmonytask/task_type_handler.go b/lib/harmony/harmonytask/task_type_handler.go deleted file mode 100644 index a8c6e58b8fc..00000000000 --- a/lib/harmony/harmonytask/task_type_handler.go +++ /dev/null @@ -1,295 +0,0 @@ -package harmonytask - -import ( - "context" - "errors" - "fmt" - "runtime" - "strconv" - "sync/atomic" - "time" - - logging "github.com/ipfs/go-log/v2" - - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" -) - -var log = logging.Logger("harmonytask") - -type taskTypeHandler struct { - TaskInterface - TaskTypeDetails - TaskEngine *TaskEngine - Count atomic.Int32 -} - -func (h *taskTypeHandler) AddTask(extra func(TaskID, *harmonydb.Tx) (bool, error)) { - var tID TaskID - retryWait := time.Millisecond * 100 -retryAddTask: - _, err := h.TaskEngine.db.BeginTransaction(h.TaskEngine.ctx, func(tx *harmonydb.Tx) (bool, error) { - // create taskID (from DB) - err := tx.QueryRow(`INSERT INTO harmony_task (name, added_by, posted_time) - VALUES ($1, $2, CURRENT_TIMESTAMP) RETURNING id`, h.Name, h.TaskEngine.ownerID).Scan(&tID) - if err != nil { - return false, fmt.Errorf("could not insert into harmonyTask: %w", err) - } - return extra(tID, tx) - }) - - if err != nil { - if harmonydb.IsErrUniqueContraint(err) { - log.Debugf("addtask(%s) saw unique constraint, so it's added already.", h.Name) - return - } - if harmonydb.IsErrSerialization(err) { - time.Sleep(retryWait) - retryWait *= 2 - goto retryAddTask - } - log.Errorw("Could not add task. AddTasFunc failed", "error", err, "type", h.Name) - return - } -} - -const ( - WorkSourcePoller = "poller" - WorkSourceRecover = "recovered" - WorkSourceIAmBored = "bored" -) - -// considerWork is called to attempt to start work on a task-id of this task type. -// It presumes single-threaded calling, so there should not be a multi-threaded re-entry. -// The only caller should be the one work poller thread. This does spin off other threads, -// but those should not considerWork. Work completing may lower the resource numbers -// unexpectedly, but that will not invalidate work being already able to fit. -func (h *taskTypeHandler) considerWork(from string, ids []TaskID) (workAccepted bool) { -top: - if len(ids) == 0 { - return true // stop looking for takers - } - - // 1. Can we do any more of this task type? - // NOTE: 0 is the default value, so this way people don't need to worry about - // this setting unless they want to limit the number of tasks of this type. - if h.Max > 0 && int(h.Count.Load()) >= h.Max { - log.Debugw("did not accept task", "name", h.Name, "reason", "at max already") - return false - } - - // 2. Can we do any more work? From here onward, we presume the resource - // story will not change, so single-threaded calling is best. - err := h.AssertMachineHasCapacity() - if err != nil { - log.Debugw("did not accept task", "name", h.Name, "reason", "at capacity already: "+err.Error()) - return false - } - - h.TaskEngine.WorkOrigin = from - - // 3. What does the impl say? -canAcceptAgain: - tID, err := h.CanAccept(ids, h.TaskEngine) - - h.TaskEngine.WorkOrigin = "" - - if err != nil { - log.Error(err) - return false - } - if tID == nil { - log.Infow("did not accept task", "task_id", ids[0], "reason", "CanAccept() refused", "name", h.Name) - return false - } - - releaseStorage := func() { - } - if h.TaskTypeDetails.Cost.Storage != nil { - if err = h.TaskTypeDetails.Cost.Storage.Claim(int(*tID)); err != nil { - log.Infow("did not accept task", "task_id", strconv.Itoa(int(*tID)), "reason", "storage claim failed", "name", h.Name, "error", err) - - if len(ids) > 1 { - var tryAgain = make([]TaskID, 0, len(ids)-1) - for _, id := range ids { - if id != *tID { - tryAgain = append(tryAgain, id) - } - } - ids = tryAgain - goto canAcceptAgain - } - - return false - } - releaseStorage = func() { - if err := h.TaskTypeDetails.Cost.Storage.MarkComplete(int(*tID)); err != nil { - log.Errorw("Could not release storage", "error", err) - } - } - } - - // if recovering we don't need to try to claim anything because those tasks are already claimed by us - if from != WorkSourceRecover { - // 4. Can we claim the work for our hostname? - ct, err := h.TaskEngine.db.Exec(h.TaskEngine.ctx, "UPDATE harmony_task SET owner_id=$1 WHERE id=$2 AND owner_id IS NULL", h.TaskEngine.ownerID, *tID) - if err != nil { - log.Error(err) - - releaseStorage() - return false - } - if ct == 0 { - log.Infow("did not accept task", "task_id", strconv.Itoa(int(*tID)), "reason", "already Taken", "name", h.Name) - releaseStorage() - - var tryAgain = make([]TaskID, 0, len(ids)-1) - for _, id := range ids { - if id != *tID { - tryAgain = append(tryAgain, id) - } - } - ids = tryAgain - goto top - } - } - - h.Count.Add(1) - go func() { - log.Infow("Beginning work on Task", "id", *tID, "from", from, "name", h.Name) - - var done bool - var doErr error - workStart := time.Now() - - defer func() { - if r := recover(); r != nil { - stackSlice := make([]byte, 4092) - sz := runtime.Stack(stackSlice, false) - log.Error("Recovered from a serious error "+ - "while processing "+h.Name+" task "+strconv.Itoa(int(*tID))+": ", r, - " Stack: ", string(stackSlice[:sz])) - } - h.Count.Add(-1) - - releaseStorage() - h.recordCompletion(*tID, workStart, done, doErr) - if done { - for _, fs := range h.TaskEngine.follows[h.Name] { // Do we know of any follows for this task type? - if _, err := fs.f(*tID, fs.h.AddTask); err != nil { - log.Error("Could not follow", "error", err, "from", h.Name, "to", fs.name) - } - } - } - }() - - done, doErr = h.Do(*tID, func() bool { - var owner int - // Background here because we don't want GracefulRestart to block this save. - err := h.TaskEngine.db.QueryRow(context.Background(), - `SELECT owner_id FROM harmony_task WHERE id=$1`, *tID).Scan(&owner) - if err != nil { - log.Error("Cannot determine ownership: ", err) - return false - } - return owner == h.TaskEngine.ownerID - }) - if doErr != nil { - log.Errorw("Do() returned error", "type", h.Name, "id", strconv.Itoa(int(*tID)), "error", doErr) - } - }() - return true -} - -func (h *taskTypeHandler) recordCompletion(tID TaskID, workStart time.Time, done bool, doErr error) { - workEnd := time.Now() - retryWait := time.Millisecond * 100 -retryRecordCompletion: - cm, err := h.TaskEngine.db.BeginTransaction(h.TaskEngine.ctx, func(tx *harmonydb.Tx) (bool, error) { - var postedTime time.Time - err := tx.QueryRow(`SELECT posted_time FROM harmony_task WHERE id=$1`, tID).Scan(&postedTime) - - if err != nil { - return false, fmt.Errorf("could not log completion: %w ", err) - } - result := "unspecified error" - if done { - _, err = tx.Exec("DELETE FROM harmony_task WHERE id=$1", tID) - if err != nil { - - return false, fmt.Errorf("could not log completion: %w", err) - } - result = "" - if doErr != nil { - result = "non-failing error: " + doErr.Error() - } - } else { - if doErr != nil { - result = "error: " + doErr.Error() - } - var deleteTask bool - if h.MaxFailures > 0 { - ct := uint(0) - err = tx.QueryRow(`SELECT count(*) FROM harmony_task_history - WHERE task_id=$1 AND result=FALSE`, tID).Scan(&ct) - if err != nil { - return false, fmt.Errorf("could not read task history: %w", err) - } - if ct >= h.MaxFailures { - deleteTask = true - } - } - if deleteTask { - _, err = tx.Exec("DELETE FROM harmony_task WHERE id=$1", tID) - if err != nil { - return false, fmt.Errorf("could not delete failed job: %w", err) - } - // Note: Extra Info is left laying around for later review & clean-up - } else { - _, err := tx.Exec(`UPDATE harmony_task SET owner_id=NULL WHERE id=$1`, tID) - if err != nil { - return false, fmt.Errorf("could not disown failed task: %v %v", tID, err) - } - } - } - _, err = tx.Exec(`INSERT INTO harmony_task_history - (task_id, name, posted, work_start, work_end, result, completed_by_host_and_port, err) -VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`, tID, h.Name, postedTime.UTC(), workStart.UTC(), workEnd.UTC(), done, h.TaskEngine.hostAndPort, result) - if err != nil { - return false, fmt.Errorf("could not write history: %w", err) - } - return true, nil - }) - if err != nil { - if harmonydb.IsErrSerialization(err) { - time.Sleep(retryWait) - retryWait *= 2 - goto retryRecordCompletion - } - log.Error("Could not record transaction: ", err) - return - } - if !cm { - log.Error("Committing the task records failed") - } -} - -func (h *taskTypeHandler) AssertMachineHasCapacity() error { - r := h.TaskEngine.ResourcesAvailable() - - if r.Cpu-h.Cost.Cpu < 0 { - return errors.New("Did not accept " + h.Name + " task: out of cpu") - } - if h.Cost.Ram > r.Ram { - return errors.New("Did not accept " + h.Name + " task: out of RAM") - } - if r.Gpu-h.Cost.Gpu < 0 { - return errors.New("Did not accept " + h.Name + " task: out of available GPU") - } - - if h.TaskTypeDetails.Cost.Storage != nil { - if !h.TaskTypeDetails.Cost.Storage.HasCapacity() { - return errors.New("Did not accept " + h.Name + " task: out of available Storage") - } - } - return nil -} diff --git a/lib/harmony/resources/getGPU.go b/lib/harmony/resources/getGPU.go deleted file mode 100644 index 62d5c091e11..00000000000 --- a/lib/harmony/resources/getGPU.go +++ /dev/null @@ -1,34 +0,0 @@ -//go:build !darwin -// +build !darwin - -package resources - -import ( - "os" - "strconv" - "strings" - - ffi "github.com/filecoin-project/filecoin-ffi" -) - -func getGPUDevices() float64 { // GPU boolean - if nstr := os.Getenv("HARMONY_OVERRIDE_GPUS"); nstr != "" { - n, err := strconv.ParseFloat(nstr, 64) - if err != nil { - logger.Errorf("parsing HARMONY_OVERRIDE_GPUS failed: %+v", err) - } else { - return n - } - } - - gpus, err := ffi.GetGPUDevices() - logger.Infow("GPUs", "list", gpus) - if err != nil { - logger.Errorf("getting gpu devices failed: %+v", err) - } - all := strings.ToLower(strings.Join(gpus, ",")) - if len(gpus) > 1 || strings.Contains(all, "ati") || strings.Contains(all, "nvidia") { - return float64(len(gpus)) - } - return 0 -} diff --git a/lib/harmony/resources/getGPU_darwin.go b/lib/harmony/resources/getGPU_darwin.go deleted file mode 100644 index a9c0a33cdab..00000000000 --- a/lib/harmony/resources/getGPU_darwin.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build darwin -// +build darwin - -package resources - -func getGPUDevices() float64 { - return 10000.0 // unserious value intended for non-production use. -} diff --git a/lib/harmony/resources/memsys.go b/lib/harmony/resources/memsys.go deleted file mode 100644 index 1a45b5b228a..00000000000 --- a/lib/harmony/resources/memsys.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build darwin || freebsd || openbsd || dragonfly || netbsd -// +build darwin freebsd openbsd dragonfly netbsd - -package resources - -import ( - "encoding/binary" - "syscall" -) - -func sysctlUint64(name string) (uint64, error) { - s, err := syscall.Sysctl(name) - if err != nil { - return 0, err - } - // hack because the string conversion above drops a \0 - b := []byte(s) - if len(b) < 8 { - b = append(b, 0) - } - return binary.LittleEndian.Uint64(b), nil -} diff --git a/lib/harmony/resources/miniopencl/cl.h b/lib/harmony/resources/miniopencl/cl.h deleted file mode 100644 index e90fb76925a..00000000000 --- a/lib/harmony/resources/miniopencl/cl.h +++ /dev/null @@ -1,17 +0,0 @@ - -#ifndef CL_H -#define CL_H - -#define CL_USE_DEPRECATED_OPENCL_1_1_APIS -#define CL_USE_DEPRECATED_OPENCL_1_2_APIS -#define CL_USE_DEPRECATED_OPENCL_2_0_APIS - -#define CL_TARGET_OPENCL_VERSION 300 - -#ifdef __APPLE__ -#include "OpenCL/opencl.h" -#else -#include "CL/opencl.h" -#endif - -#endif /* CL_H */ \ No newline at end of file diff --git a/lib/harmony/resources/miniopencl/mini_opencl.go b/lib/harmony/resources/miniopencl/mini_opencl.go deleted file mode 100644 index d2486a88f66..00000000000 --- a/lib/harmony/resources/miniopencl/mini_opencl.go +++ /dev/null @@ -1,93 +0,0 @@ -// Package cl was borrowed from the go-opencl library which is more complex and -// doesn't compile well for our needs. -package cl - -// #include "cl.h" -import "C" -import ( - "fmt" - "unsafe" -) - -const maxPlatforms = 32 - -type Platform struct { - id C.cl_platform_id -} - -// Obtain the list of platforms available. -func GetPlatforms() ([]*Platform, error) { - var platformIds [maxPlatforms]C.cl_platform_id - var nPlatforms C.cl_uint - err := C.clGetPlatformIDs(C.cl_uint(maxPlatforms), &platformIds[0], &nPlatforms) - if err == -1001 { // No platforms found - return nil, nil - } - if err != C.CL_SUCCESS { - return nil, toError(err) - } - platforms := make([]*Platform, nPlatforms) - for i := 0; i < int(nPlatforms); i++ { - platforms[i] = &Platform{id: platformIds[i]} - } - return platforms, nil -} - -const maxDeviceCount = 64 - -type DeviceType uint - -const ( - DeviceTypeAll DeviceType = C.CL_DEVICE_TYPE_ALL -) - -type Device struct { - id C.cl_device_id -} - -func (p *Platform) GetAllDevices() ([]*Device, error) { - var deviceIds [maxDeviceCount]C.cl_device_id - var numDevices C.cl_uint - var platformId C.cl_platform_id - if p != nil { - platformId = p.id - } - if err := C.clGetDeviceIDs(platformId, C.cl_device_type(DeviceTypeAll), C.cl_uint(maxDeviceCount), &deviceIds[0], &numDevices); err != C.CL_SUCCESS { - return nil, toError(err) - } - if numDevices > maxDeviceCount { - numDevices = maxDeviceCount - } - devices := make([]*Device, numDevices) - for i := 0; i < int(numDevices); i++ { - devices[i] = &Device{id: deviceIds[i]} - } - return devices, nil -} - -func toError(code C.cl_int) error { - return ErrOther(code) -} - -type ErrOther int - -func (e ErrOther) Error() string { - return fmt.Sprintf("OpenCL: error %d", int(e)) -} - -// Size of global device memory in bytes. -func (d *Device) GlobalMemSize() int64 { - val, _ := d.getInfoUlong(C.CL_DEVICE_GLOBAL_MEM_SIZE, true) - return val -} - -func (d *Device) getInfoUlong(param C.cl_device_info, panicOnError bool) (int64, error) { - var val C.cl_ulong - if err := C.clGetDeviceInfo(d.id, param, C.size_t(unsafe.Sizeof(val)), unsafe.Pointer(&val), nil); err != C.CL_SUCCESS { - if panicOnError { - panic("Should never fail") - } - return 0, toError(err) - } - return int64(val), nil -} diff --git a/lib/harmony/resources/resources.go b/lib/harmony/resources/resources.go deleted file mode 100644 index 33bc80d6fe7..00000000000 --- a/lib/harmony/resources/resources.go +++ /dev/null @@ -1,165 +0,0 @@ -package resources - -import ( - "bytes" - "context" - "os/exec" - "regexp" - "runtime" - "sync/atomic" - "time" - - "github.com/elastic/go-sysinfo" - logging "github.com/ipfs/go-log/v2" - "golang.org/x/sys/unix" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" -) - -var LOOKS_DEAD_TIMEOUT = 10 * time.Minute // Time w/o minute heartbeats - -type Resources struct { - Cpu int - Gpu float64 - Ram uint64 - MachineID int - Storage -} - -// Optional Storage management. -type Storage interface { - HasCapacity() bool - - // This allows some other system to claim space for this task. - Claim(taskID int) error - - // This allows some other system to consider the task done. - // It's up to the caller to remove the data, if that applies. - MarkComplete(taskID int) error -} -type Reg struct { - Resources - shutdown atomic.Bool -} - -var logger = logging.Logger("harmonytask") - -var lotusRE = regexp.MustCompile("lotus-worker|lotus-harmony|yugabyted|yb-master|yb-tserver") - -func Register(db *harmonydb.DB, hostnameAndPort string) (*Reg, error) { - var reg Reg - var err error - reg.Resources, err = getResources() - if err != nil { - return nil, err - } - ctx := context.Background() - { // Learn our owner_id while updating harmony_machines - var ownerID *int - - // Upsert query with last_contact update, fetch the machine ID - // (note this isn't a simple insert .. on conflict because host_and_port isn't unique) - err := db.QueryRow(ctx, ` - WITH upsert AS ( - UPDATE harmony_machines - SET cpu = $2, ram = $3, gpu = $4, last_contact = CURRENT_TIMESTAMP - WHERE host_and_port = $1 - RETURNING id - ), - inserted AS ( - INSERT INTO harmony_machines (host_and_port, cpu, ram, gpu, last_contact) - SELECT $1, $2, $3, $4, CURRENT_TIMESTAMP - WHERE NOT EXISTS (SELECT id FROM upsert) - RETURNING id - ) - SELECT id FROM upsert - UNION ALL - SELECT id FROM inserted; - `, hostnameAndPort, reg.Cpu, reg.Ram, reg.Gpu).Scan(&ownerID) - if err != nil { - return nil, xerrors.Errorf("inserting machine entry: %w", err) - } - if ownerID == nil { - return nil, xerrors.Errorf("no owner id") - } - - reg.MachineID = *ownerID - - cleaned := CleanupMachines(context.Background(), db) - logger.Infow("Cleaned up machines", "count", cleaned) - } - go func() { - for { - time.Sleep(time.Minute) - if reg.shutdown.Load() { - return - } - _, err := db.Exec(ctx, `UPDATE harmony_machines SET last_contact=CURRENT_TIMESTAMP where id=$1`, reg.MachineID) - if err != nil { - logger.Error("Cannot keepalive ", err) - } - } - }() - - return ®, nil -} - -func CleanupMachines(ctx context.Context, db *harmonydb.DB) int { - ct, err := db.Exec(ctx, - `DELETE FROM harmony_machines WHERE last_contact < CURRENT_TIMESTAMP - INTERVAL '1 MILLISECOND' * $1 `, - LOOKS_DEAD_TIMEOUT.Milliseconds()) // ms enables unit testing to change timeout. - if err != nil { - logger.Warn("unable to delete old machines: ", err) - } - return ct -} - -func (res *Reg) Shutdown() { - res.shutdown.Store(true) -} - -func getResources() (res Resources, err error) { - b, err := exec.Command(`ps`, `-ef`).CombinedOutput() - if err != nil { - logger.Warn("Could not safety check for 2+ processes: ", err) - } else { - found := 0 - for _, b := range bytes.Split(b, []byte("\n")) { - if lotusRE.Match(b) { - found++ - } - } - if found > 1 { - logger.Warn("curio's defaults are for running alone. Use task maximums or CGroups.") - } - } - - h, err := sysinfo.Host() - if err != nil { - return Resources{}, err - } - - mem, err := h.Memory() - if err != nil { - return Resources{}, err - } - - res = Resources{ - Cpu: runtime.NumCPU(), - Ram: mem.Available, - Gpu: getGPUDevices(), - } - - return res, nil -} - -func DiskFree(path string) (uint64, error) { - s := unix.Statfs_t{} - err := unix.Statfs(path, &s) - if err != nil { - return 0, err - } - - return s.Bfree * uint64(s.Bsize), nil -} diff --git a/lib/harmony/taskhelp/common.go b/lib/harmony/taskhelp/common.go deleted file mode 100644 index eaeb4a1bf7b..00000000000 --- a/lib/harmony/taskhelp/common.go +++ /dev/null @@ -1,19 +0,0 @@ -package taskhelp - -// SubsetIf returns a subset of the slice for which the predicate is true. -// It does not allocate memory, but rearranges the list in place. -// A non-zero list input will always return a non-zero list. -// The return value is the subset and a boolean indicating whether the subset was sliced. -func SliceIfFound[T any](slice []T, f func(T) bool) ([]T, bool) { - ct := 0 - for i, v := range slice { - if f(v) { - slice[ct], slice[i] = slice[i], slice[ct] - ct++ - } - } - if ct == 0 { - return slice, false - } - return slice[:ct], true -} diff --git a/lib/passcall/every.go b/lib/passcall/every.go deleted file mode 100644 index f39543063dd..00000000000 --- a/lib/passcall/every.go +++ /dev/null @@ -1,28 +0,0 @@ -package passcall - -import ( - "sync" - "time" -) - -// Every is a helper function that will call the provided callback -// function at most once every `passEvery` duration. If the function is called -// more frequently than that, it will return nil and not call the callback. -func Every[P, R any](passInterval time.Duration, cb func(P) R) func(P) R { - var lastCall time.Time - var lk sync.Mutex - - return func(param P) R { - lk.Lock() - defer lk.Unlock() - - if time.Since(lastCall) < passInterval { - return *new(R) - } - - defer func() { - lastCall = time.Now() - }() - return cb(param) - } -} diff --git a/lib/promise/promise.go b/lib/promise/promise.go deleted file mode 100644 index 02e917ca121..00000000000 --- a/lib/promise/promise.go +++ /dev/null @@ -1,53 +0,0 @@ -package promise - -import ( - "context" - "sync" -) - -type Promise[T any] struct { - val T - done chan struct{} - mu sync.Mutex -} - -func (p *Promise[T]) Set(val T) { - p.mu.Lock() - defer p.mu.Unlock() - - // Set value - p.val = val - - // Initialize the done channel if it hasn't been initialized - if p.done == nil { - p.done = make(chan struct{}) - } - - // Signal that the value is set - close(p.done) -} - -func (p *Promise[T]) Val(ctx context.Context) T { - p.mu.Lock() - // Initialize the done channel if it hasn't been initialized - if p.done == nil { - p.done = make(chan struct{}) - } - p.mu.Unlock() - - select { - case <-ctx.Done(): - return *new(T) - case <-p.done: - p.mu.Lock() - val := p.val - p.mu.Unlock() - return val - } -} - -func (p *Promise[T]) IsSet() bool { - p.mu.Lock() - defer p.mu.Unlock() - return p.done != nil -} diff --git a/lib/promise/promise_test.go b/lib/promise/promise_test.go deleted file mode 100644 index c2e9b656e95..00000000000 --- a/lib/promise/promise_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package promise - -import ( - "context" - "sync" - "testing" - "time" -) - -func TestPromiseSet(t *testing.T) { - p := &Promise[int]{} - - p.Set(42) - if p.val != 42 { - t.Fatalf("expected 42, got %v", p.val) - } -} - -func TestPromiseVal(t *testing.T) { - p := &Promise[int]{} - - p.Set(42) - - ctx := context.Background() - val := p.Val(ctx) - - if val != 42 { - t.Fatalf("expected 42, got %v", val) - } -} - -func TestPromiseValWaitsForSet(t *testing.T) { - p := &Promise[int]{} - var val int - - var wg sync.WaitGroup - wg.Add(1) - - go func() { - defer wg.Done() - ctx := context.Background() - val = p.Val(ctx) - }() - - time.Sleep(100 * time.Millisecond) // Give some time for the above goroutine to execute - p.Set(42) - wg.Wait() - - if val != 42 { - t.Fatalf("expected 42, got %v", val) - } -} - -func TestPromiseValContextCancel(t *testing.T) { - p := &Promise[int]{} - ctx, cancel := context.WithCancel(context.Background()) - cancel() // Cancel the context - - val := p.Val(ctx) - - var zeroValue int - if val != zeroValue { - t.Fatalf("expected zero-value, got %v", val) - } -} diff --git a/lib/unixfs/filestore.go b/lib/unixfs/filestore.go deleted file mode 100644 index f50e933b68f..00000000000 --- a/lib/unixfs/filestore.go +++ /dev/null @@ -1,159 +0,0 @@ -package unixfs - -import ( - "context" - "fmt" - "io" - "os" - - "github.com/ipfs/boxo/blockservice" - bstore "github.com/ipfs/boxo/blockstore" - chunker "github.com/ipfs/boxo/chunker" - offline "github.com/ipfs/boxo/exchange/offline" - "github.com/ipfs/boxo/files" - "github.com/ipfs/boxo/ipld/merkledag" - "github.com/ipfs/boxo/ipld/unixfs/importer/balanced" - ihelper "github.com/ipfs/boxo/ipld/unixfs/importer/helpers" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-cidutil" - ipld "github.com/ipfs/go-ipld-format" - mh "github.com/multiformats/go-multihash" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-fil-markets/stores" - - "github.com/filecoin-project/lotus/build" -) - -var DefaultHashFunction = uint64(mh.BLAKE2B_MIN + 31) - -func CidBuilder() (cid.Builder, error) { - prefix, err := merkledag.PrefixForCidVersion(1) - if err != nil { - return nil, fmt.Errorf("failed to initialize UnixFS CID Builder: %w", err) - } - prefix.MhType = DefaultHashFunction - b := cidutil.InlineBuilder{ - Builder: prefix, - Limit: 126, - } - return b, nil -} - -// CreateFilestore takes a standard file whose path is src, forms a UnixFS DAG, and -// writes a CARv2 file with positional mapping (backed by the go-filestore library). -func CreateFilestore(ctx context.Context, srcPath string, dstPath string) (cid.Cid, error) { - // This method uses a two-phase approach with a staging CAR blockstore and - // a final CAR blockstore. - // - // This is necessary because of https://github.com/ipld/go-car/issues/196 - // - // TODO: do we need to chunk twice? Isn't the first output already in the - // right order? Can't we just copy the CAR file and replace the header? - - src, err := os.Open(srcPath) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to open input file: %w", err) - } - defer src.Close() //nolint:errcheck - - stat, err := src.Stat() - if err != nil { - return cid.Undef, xerrors.Errorf("failed to stat file :%w", err) - } - - file, err := files.NewReaderPathFile(srcPath, src, stat) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to create reader path file: %w", err) - } - - f, err := os.CreateTemp("", "") - if err != nil { - return cid.Undef, xerrors.Errorf("failed to create temp file: %w", err) - } - _ = f.Close() // close; we only want the path. - - tmp := f.Name() - defer os.Remove(tmp) //nolint:errcheck - - // Step 1. Compute the UnixFS DAG and write it to a CARv2 file to get - // the root CID of the DAG. - fstore, err := stores.ReadWriteFilestore(tmp) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to create temporary filestore: %w", err) - } - - finalRoot1, err := Build(ctx, file, fstore, true) - if err != nil { - _ = fstore.Close() - return cid.Undef, xerrors.Errorf("failed to import file to store to compute root: %w", err) - } - - if err := fstore.Close(); err != nil { - return cid.Undef, xerrors.Errorf("failed to finalize car filestore: %w", err) - } - - // Step 2. We now have the root of the UnixFS DAG, and we can write the - // final CAR for real under `dst`. - bs, err := stores.ReadWriteFilestore(dstPath, finalRoot1) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to create a carv2 read/write filestore: %w", err) - } - - // rewind file to the beginning. - if _, err := src.Seek(0, 0); err != nil { - return cid.Undef, xerrors.Errorf("failed to rewind file: %w", err) - } - - finalRoot2, err := Build(ctx, file, bs, true) - if err != nil { - _ = bs.Close() - return cid.Undef, xerrors.Errorf("failed to create UnixFS DAG with carv2 blockstore: %w", err) - } - - if err := bs.Close(); err != nil { - return cid.Undef, xerrors.Errorf("failed to finalize car blockstore: %w", err) - } - - if finalRoot1 != finalRoot2 { - return cid.Undef, xerrors.New("roots do not match") - } - - return finalRoot1, nil -} - -// Build builds a UnixFS DAG out of the supplied reader, -// and imports the DAG into the supplied service. -func Build(ctx context.Context, reader io.Reader, into bstore.Blockstore, filestore bool) (cid.Cid, error) { - b, err := CidBuilder() - if err != nil { - return cid.Undef, err - } - - bsvc := blockservice.New(into, offline.Exchange(into)) - dags := merkledag.NewDAGService(bsvc) - bufdag := ipld.NewBufferedDAG(ctx, dags) - - params := ihelper.DagBuilderParams{ - Maxlinks: build.UnixfsLinksPerLevel, - RawLeaves: true, - CidBuilder: b, - Dagserv: bufdag, - NoCopy: filestore, - } - - db, err := params.New(chunker.NewSizeSplitter(reader, int64(build.UnixfsChunkSize))) - if err != nil { - return cid.Undef, err - } - nd, err := balanced.Layout(db) - if err != nil { - return cid.Undef, err - } - - if err := bufdag.Commit(); err != nil { - return cid.Undef, err - } - - return nd.Cid(), nil -} diff --git a/lib/unixfs/filestore_test.go b/lib/unixfs/filestore_test.go deleted file mode 100644 index 868698bced3..00000000000 --- a/lib/unixfs/filestore_test.go +++ /dev/null @@ -1,128 +0,0 @@ -// stm: #unit -package unixfs - -import ( - "bytes" - "context" - "io" - "os" - "strings" - "testing" - - "github.com/ipfs/boxo/blockservice" - offline "github.com/ipfs/boxo/exchange/offline" - "github.com/ipfs/boxo/files" - "github.com/ipfs/boxo/ipld/merkledag" - unixfile "github.com/ipfs/boxo/ipld/unixfs/file" - "github.com/ipfs/go-cid" - carv2 "github.com/ipld/go-car/v2" - "github.com/ipld/go-car/v2/blockstore" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/stores" -) - -// This test uses a full "dense" CARv2, and not a filestore (positional mapping). -func TestRoundtripUnixFS_Dense(t *testing.T) { - // stm: @CLIENT_DATA_IMPORT_002 - ctx := context.Background() - - inputPath, inputContents := genInputFile(t) - defer os.Remove(inputPath) //nolint:errcheck - - carv2File := newTmpFile(t) - defer os.Remove(carv2File) //nolint:errcheck - - // import a file to a Unixfs DAG using a CARv2 read/write blockstore. - bs, err := blockstore.OpenReadWrite(carv2File, nil, - carv2.ZeroLengthSectionAsEOF(true), - blockstore.UseWholeCIDs(true)) - require.NoError(t, err) - - root, err := Build(ctx, bytes.NewBuffer(inputContents), bs, false) - require.NoError(t, err) - require.NotEqual(t, cid.Undef, root) - require.NoError(t, bs.Finalize()) - - // reconstruct the file. - readOnly, err := blockstore.OpenReadOnly(carv2File, - carv2.ZeroLengthSectionAsEOF(true), - blockstore.UseWholeCIDs(true)) - require.NoError(t, err) - defer readOnly.Close() //nolint:errcheck - - dags := merkledag.NewDAGService(blockservice.New(readOnly, offline.Exchange(readOnly))) - - nd, err := dags.Get(ctx, root) - require.NoError(t, err) - - file, err := unixfile.NewUnixfsFile(ctx, dags, nd) - require.NoError(t, err) - - tmpOutput := newTmpFile(t) - defer os.Remove(tmpOutput) //nolint:errcheck - require.NoError(t, files.WriteTo(file, tmpOutput)) - - // ensure contents of the initial input file and the output file are identical. - fo, err := os.Open(tmpOutput) - require.NoError(t, err) - bz2, err := io.ReadAll(fo) - require.NoError(t, err) - require.NoError(t, fo.Close()) - require.Equal(t, inputContents, bz2) -} - -func TestRoundtripUnixFS_Filestore(t *testing.T) { - // stm: @CLIENT_DATA_IMPORT_001 - ctx := context.Background() - - inputPath, inputContents := genInputFile(t) - defer os.Remove(inputPath) //nolint:errcheck - - dst := newTmpFile(t) - defer os.Remove(dst) //nolint:errcheck - - root, err := CreateFilestore(ctx, inputPath, dst) - require.NoError(t, err) - require.NotEqual(t, cid.Undef, root) - - // convert the CARv2 to a normal file again and ensure the contents match - fs, err := stores.ReadOnlyFilestore(dst) - require.NoError(t, err) - defer fs.Close() //nolint:errcheck - - dags := merkledag.NewDAGService(blockservice.New(fs, offline.Exchange(fs))) - - nd, err := dags.Get(ctx, root) - require.NoError(t, err) - - file, err := unixfile.NewUnixfsFile(ctx, dags, nd) - require.NoError(t, err) - - tmpOutput := newTmpFile(t) - defer os.Remove(tmpOutput) //nolint:errcheck - require.NoError(t, files.WriteTo(file, tmpOutput)) - - // ensure contents of the initial input file and the output file are identical. - fo, err := os.Open(tmpOutput) - require.NoError(t, err) - bz2, err := io.ReadAll(fo) - require.NoError(t, err) - require.NoError(t, fo.Close()) - require.Equal(t, inputContents, bz2) -} - -// creates a new tempdir each time, guaranteeing uniqueness -func newTmpFile(t *testing.T) string { - return t.TempDir() + string(os.PathSeparator) + "tmp" -} - -func genInputFile(t *testing.T) (filepath string, contents []byte) { - s := strings.Repeat("abcde", 100) - tmp, err := os.CreateTemp("", "") - require.NoError(t, err) - _, err = io.Copy(tmp, strings.NewReader(s)) - require.NoError(t, err) - require.NoError(t, tmp.Close()) - return tmp.Name(), []byte(s) -} diff --git a/markets/dagstore/blockstore.go b/markets/dagstore/blockstore.go deleted file mode 100644 index 0ba68f549f9..00000000000 --- a/markets/dagstore/blockstore.go +++ /dev/null @@ -1,34 +0,0 @@ -package dagstore - -import ( - "context" - "io" - - bstore "github.com/ipfs/boxo/blockstore" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/dagstore" -) - -// Blockstore promotes a dagstore.ReadBlockstore to a full closeable Blockstore, -// stubbing out the write methods with erroring implementations. -type Blockstore struct { - dagstore.ReadBlockstore - io.Closer -} - -var _ bstore.Blockstore = (*Blockstore)(nil) - -func (b *Blockstore) DeleteBlock(context.Context, cid.Cid) error { - return xerrors.Errorf("DeleteBlock called but not implemented") -} - -func (b *Blockstore) Put(context.Context, blocks.Block) error { - return xerrors.Errorf("Put called but not implemented") -} - -func (b *Blockstore) PutMany(context.Context, []blocks.Block) error { - return xerrors.Errorf("PutMany called but not implemented") -} diff --git a/markets/dagstore/fixtures/sample-rw-bs-v2.car b/markets/dagstore/fixtures/sample-rw-bs-v2.car deleted file mode 100644 index 9f7b56df358..00000000000 Binary files a/markets/dagstore/fixtures/sample-rw-bs-v2.car and /dev/null differ diff --git a/markets/dagstore/miner_api.go b/markets/dagstore/miner_api.go deleted file mode 100644 index 773654af8ed..00000000000 --- a/markets/dagstore/miner_api.go +++ /dev/null @@ -1,205 +0,0 @@ -package dagstore - -import ( - "context" - "fmt" - - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/dagstore/mount" - "github.com/filecoin-project/dagstore/throttle" - "github.com/filecoin-project/go-fil-markets/piecestore" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/go-state-types/abi" -) - -//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_lotus_accessor.go -package=mock_dagstore . MinerAPI - -type MinerAPI interface { - FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (mount.Reader, error) - GetUnpaddedCARSize(ctx context.Context, pieceCid cid.Cid) (uint64, error) - IsUnsealed(ctx context.Context, pieceCid cid.Cid) (bool, error) - Start(ctx context.Context) error -} - -type SectorAccessor interface { - retrievalmarket.SectorAccessor - - UnsealSectorAt(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (mount.Reader, error) -} - -type minerAPI struct { - pieceStore piecestore.PieceStore - sa SectorAccessor - throttle throttle.Throttler - unsealThrottle throttle.Throttler - readyMgr *shared.ReadyManager -} - -var _ MinerAPI = (*minerAPI)(nil) - -func NewMinerAPI(store piecestore.PieceStore, sa SectorAccessor, concurrency int, unsealConcurrency int) MinerAPI { - var unsealThrottle throttle.Throttler - if unsealConcurrency == 0 { - unsealThrottle = throttle.Noop() - } else { - unsealThrottle = throttle.Fixed(unsealConcurrency) - } - return &minerAPI{ - pieceStore: store, - sa: sa, - throttle: throttle.Fixed(concurrency), - unsealThrottle: unsealThrottle, - readyMgr: shared.NewReadyManager(), - } -} - -func (m *minerAPI) Start(_ context.Context) error { - return m.readyMgr.FireReady(nil) -} - -func (m *minerAPI) IsUnsealed(ctx context.Context, pieceCid cid.Cid) (bool, error) { - err := m.readyMgr.AwaitReady() - if err != nil { - return false, xerrors.Errorf("failed while waiting for accessor to start: %w", err) - } - - var pieceInfo piecestore.PieceInfo - err = m.throttle.Do(ctx, func(ctx context.Context) (err error) { - pieceInfo, err = m.pieceStore.GetPieceInfo(pieceCid) - return err - }) - - if err != nil { - return false, xerrors.Errorf("failed to fetch pieceInfo for piece %s: %w", pieceCid, err) - } - - if len(pieceInfo.Deals) == 0 { - return false, xerrors.Errorf("no storage deals found for piece %s", pieceCid) - } - - // check if we have an unsealed deal for the given piece in any of the unsealed sectors. - for _, deal := range pieceInfo.Deals { - deal := deal - - var isUnsealed bool - // Throttle this path to avoid flooding the storage subsystem. - err := m.throttle.Do(ctx, func(ctx context.Context) (err error) { - isUnsealed, err = m.sa.IsUnsealed(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded()) - if err != nil { - return fmt.Errorf("failed to check if sector %d for deal %d was unsealed: %w", deal.SectorID, deal.DealID, err) - } - return nil - }) - - if err != nil { - log.Warnf("failed to check/retrieve unsealed sector: %s", err) - continue // move on to the next match. - } - - if isUnsealed { - return true, nil - } - } - - // we don't have an unsealed sector containing the piece - return false, nil -} - -func (m *minerAPI) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (mount.Reader, error) { - err := m.readyMgr.AwaitReady() - if err != nil { - return nil, err - } - - // Throttle this path to avoid flooding the storage subsystem. - var pieceInfo piecestore.PieceInfo - err = m.throttle.Do(ctx, func(ctx context.Context) (err error) { - pieceInfo, err = m.pieceStore.GetPieceInfo(pieceCid) - return err - }) - - if err != nil { - return nil, xerrors.Errorf("failed to fetch pieceInfo for piece %s: %w", pieceCid, err) - } - - if len(pieceInfo.Deals) == 0 { - return nil, xerrors.Errorf("no storage deals found for piece %s", pieceCid) - } - - // prefer an unsealed sector containing the piece if one exists - for _, deal := range pieceInfo.Deals { - deal := deal - - // Throttle this path to avoid flooding the storage subsystem. - var reader mount.Reader - err := m.throttle.Do(ctx, func(ctx context.Context) (err error) { - isUnsealed, err := m.sa.IsUnsealed(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded()) - if err != nil { - return fmt.Errorf("failed to check if sector %d for deal %d was unsealed: %w", deal.SectorID, deal.DealID, err) - } - if !isUnsealed { - return nil - } - // Because we know we have an unsealed copy, this UnsealSector call will actually not perform any unsealing. - reader, err = m.sa.UnsealSectorAt(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded()) - return err - }) - - if err != nil { - log.Warnf("failed to check/retrieve unsealed sector: %s", err) - continue // move on to the next match. - } - - if reader != nil { - // we were able to obtain a reader for an already unsealed piece - return reader, nil - } - } - - lastErr := xerrors.New("no sectors found to unseal from") - - // if there is no unsealed sector containing the piece, just read the piece from the first sector we are able to unseal. - for _, deal := range pieceInfo.Deals { - // Note that if the deal data is not already unsealed, unsealing may - // block for a long time with the current PoRep - var reader mount.Reader - deal := deal - err := m.throttle.Do(ctx, func(ctx context.Context) (err error) { - // Because we know we have an unsealed copy, this UnsealSector call will actually not perform any unsealing. - reader, err = m.sa.UnsealSectorAt(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded()) - return err - }) - - if err != nil { - lastErr = xerrors.Errorf("failed to unseal deal %d: %w", deal.DealID, err) - log.Warn(lastErr.Error()) - continue - } - - // Successfully fetched the deal data so return a reader over the data - return reader, nil - } - - return nil, lastErr -} - -func (m *minerAPI) GetUnpaddedCARSize(ctx context.Context, pieceCid cid.Cid) (uint64, error) { - err := m.readyMgr.AwaitReady() - if err != nil { - return 0, err - } - - pieceInfo, err := m.pieceStore.GetPieceInfo(pieceCid) - if err != nil { - return 0, xerrors.Errorf("failed to fetch pieceInfo for piece %s: %w", pieceCid, err) - } - - if len(pieceInfo.Deals) == 0 { - return 0, xerrors.Errorf("no storage deals found for piece %s", pieceCid) - } - - return uint64(pieceInfo.Deals[0].Length), nil -} diff --git a/markets/dagstore/miner_api_test.go b/markets/dagstore/miner_api_test.go deleted file mode 100644 index d13b098fc7f..00000000000 --- a/markets/dagstore/miner_api_test.go +++ /dev/null @@ -1,252 +0,0 @@ -// stm: #unit -package dagstore - -import ( - "bytes" - "context" - "io" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/ipfs/go-cid" - ds "github.com/ipfs/go-datastore" - ds_sync "github.com/ipfs/go-datastore/sync" - "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" - - "github.com/filecoin-project/dagstore/mount" - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/piecestore" - piecestoreimpl "github.com/filecoin-project/go-fil-markets/piecestore/impl" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/go-state-types/abi" - paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" -) - -const unsealedSectorID = abi.SectorNumber(1) -const sealedSectorID = abi.SectorNumber(2) - -func TestLotusAccessorFetchUnsealedPiece(t *testing.T) { - ctx := context.Background() - - cid1, err := cid.Parse("bafkqaaa") - require.NoError(t, err) - - unsealedSectorData := "unsealed" - sealedSectorData := "sealed" - mockData := map[abi.SectorNumber]string{ - unsealedSectorID: unsealedSectorData, - sealedSectorID: sealedSectorData, - } - - testCases := []struct { - name string - deals []abi.SectorNumber - fetchedData string - isUnsealed bool - - expectErr bool - }{{ - // Expect error if there is no deal info for piece CID - name: "no deals", - expectErr: true, - }, { - // Expect the API to always fetch the unsealed deal (because it's - // cheaper than fetching the sealed deal) - name: "prefer unsealed deal", - deals: []abi.SectorNumber{unsealedSectorID, sealedSectorID}, - fetchedData: unsealedSectorData, - isUnsealed: true, - }, { - // Expect the API to unseal the data if there are no unsealed deals - name: "unseal if necessary", - deals: []abi.SectorNumber{sealedSectorID}, - fetchedData: sealedSectorData, - isUnsealed: false, - }} - - for _, tc := range testCases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - ps := getPieceStore(t) - rpn := &mockRPN{ - sectors: mockData, - } - api := NewMinerAPI(ps, rpn, 100, 5) - require.NoError(t, api.Start(ctx)) - - // Add deals to piece store - for _, sectorID := range tc.deals { - dealInfo := piecestore.DealInfo{ - SectorID: sectorID, - } - err = ps.AddDealForPiece(cid1, cid.Undef, dealInfo) - require.NoError(t, err) - } - - // Fetch the piece - //stm: @MARKET_DAGSTORE_FETCH_UNSEALED_PIECE_001 - r, err := api.FetchUnsealedPiece(ctx, cid1) - if tc.expectErr { - require.Error(t, err) - return - } - - // Check that the returned reader is for the correct piece - require.NoError(t, err) - bz, err := io.ReadAll(r) - require.NoError(t, err) - - require.Equal(t, tc.fetchedData, string(bz)) - - //stm: @MARKET_DAGSTORE_IS_PIECE_UNSEALED_001 - uns, err := api.IsUnsealed(ctx, cid1) - require.NoError(t, err) - require.Equal(t, tc.isUnsealed, uns) - }) - } -} - -func TestLotusAccessorGetUnpaddedCARSize(t *testing.T) { - ctx := context.Background() - cid1, err := cid.Parse("bafkqaaa") - require.NoError(t, err) - - ps := getPieceStore(t) - rpn := &mockRPN{} - api := NewMinerAPI(ps, rpn, 100, 5) - require.NoError(t, api.Start(ctx)) - - // Add a deal with data Length 10 - dealInfo := piecestore.DealInfo{ - Length: 10, - } - err = ps.AddDealForPiece(cid1, cid.Undef, dealInfo) - require.NoError(t, err) - - // Check that the data length is correct - //stm: @MARKET_DAGSTORE_GET_UNPADDED_CAR_SIZE_001 - l, err := api.GetUnpaddedCARSize(ctx, cid1) - require.NoError(t, err) - require.EqualValues(t, 10, l) -} - -func TestThrottle(t *testing.T) { - ctx := context.Background() - cid1, err := cid.Parse("bafkqaaa") - require.NoError(t, err) - - ps := getPieceStore(t) - rpn := &mockRPN{ - sectors: map[abi.SectorNumber]string{ - unsealedSectorID: "foo", - }, - } - api := NewMinerAPI(ps, rpn, 3, 5) - require.NoError(t, api.Start(ctx)) - - // Add a deal with data Length 10 - dealInfo := piecestore.DealInfo{ - SectorID: unsealedSectorID, - Length: 10, - } - err = ps.AddDealForPiece(cid1, cid.Undef, dealInfo) - require.NoError(t, err) - - // hold the lock to block. - rpn.lk.Lock() - - // fetch the piece concurrently. - errgrp, ctx := errgroup.WithContext(context.Background()) - for i := 0; i < 10; i++ { - errgrp.Go(func() error { - //stm: @MARKET_DAGSTORE_FETCH_UNSEALED_PIECE_001 - r, err := api.FetchUnsealedPiece(ctx, cid1) - if err == nil { - _ = r.Close() - } - return err - }) - } - - time.Sleep(500 * time.Millisecond) - require.EqualValues(t, 3, atomic.LoadInt32(&rpn.calls)) // throttled - - // allow to proceed. - rpn.lk.Unlock() - - // allow all to finish. - err = errgrp.Wait() - require.NoError(t, err) - - require.EqualValues(t, 10, atomic.LoadInt32(&rpn.calls)) // throttled - -} - -func getPieceStore(t *testing.T) piecestore.PieceStore { - ps, err := piecestoreimpl.NewPieceStore(ds_sync.MutexWrap(ds.NewMapDatastore())) - require.NoError(t, err) - - ch := make(chan struct{}, 1) - ps.OnReady(func(_ error) { - ch <- struct{}{} - }) - - err = ps.Start(context.Background()) - require.NoError(t, err) - <-ch - return ps -} - -type mockRPN struct { - calls int32 // guarded by atomic - lk sync.RWMutex // lock to simulate blocks. - sectors map[abi.SectorNumber]string -} - -func (m *mockRPN) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) { - return m.UnsealSectorAt(ctx, sectorID, offset, length) -} - -func (m *mockRPN) UnsealSectorAt(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (mount.Reader, error) { - atomic.AddInt32(&m.calls, 1) - m.lk.RLock() - defer m.lk.RUnlock() - - data, ok := m.sectors[sectorID] - if !ok { - panic("sector not found") - } - return struct { - io.ReadCloser - io.ReaderAt - io.Seeker - }{ - ReadCloser: io.NopCloser(bytes.NewBuffer([]byte(data[:]))), - }, nil -} - -func (m *mockRPN) IsUnsealed(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) { - return sectorID == unsealedSectorID, nil -} - -func (m *mockRPN) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) { - panic("implement me") -} - -func (m *mockRPN) GetMinerWorkerAddress(ctx context.Context, miner address.Address, tok shared.TipSetToken) (address.Address, error) { - panic("implement me") -} - -func (m *mockRPN) SavePaymentVoucher(ctx context.Context, paymentChannel address.Address, voucher *paychtypes.SignedVoucher, proof []byte, expectedAmount abi.TokenAmount, tok shared.TipSetToken) (abi.TokenAmount, error) { - panic("implement me") -} - -func (m *mockRPN) GetRetrievalPricingInput(ctx context.Context, pieceCID cid.Cid, storageDeals []abi.DealID) (retrievalmarket.PricingInput, error) { - panic("implement me") -} - -var _ retrievalmarket.RetrievalProviderNode = (*mockRPN)(nil) diff --git a/markets/dagstore/mocks/mock_lotus_accessor.go b/markets/dagstore/mocks/mock_lotus_accessor.go deleted file mode 100644 index 3910512cf2e..00000000000 --- a/markets/dagstore/mocks/mock_lotus_accessor.go +++ /dev/null @@ -1,97 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/filecoin-project/lotus/markets/dagstore (interfaces: MinerAPI) - -// Package mock_dagstore is a generated GoMock package. -package mock_dagstore - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - cid "github.com/ipfs/go-cid" - - mount "github.com/filecoin-project/dagstore/mount" -) - -// MockMinerAPI is a mock of MinerAPI interface. -type MockMinerAPI struct { - ctrl *gomock.Controller - recorder *MockMinerAPIMockRecorder -} - -// MockMinerAPIMockRecorder is the mock recorder for MockMinerAPI. -type MockMinerAPIMockRecorder struct { - mock *MockMinerAPI -} - -// NewMockMinerAPI creates a new mock instance. -func NewMockMinerAPI(ctrl *gomock.Controller) *MockMinerAPI { - mock := &MockMinerAPI{ctrl: ctrl} - mock.recorder = &MockMinerAPIMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockMinerAPI) EXPECT() *MockMinerAPIMockRecorder { - return m.recorder -} - -// FetchUnsealedPiece mocks base method. -func (m *MockMinerAPI) FetchUnsealedPiece(arg0 context.Context, arg1 cid.Cid) (mount.Reader, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchUnsealedPiece", arg0, arg1) - ret0, _ := ret[0].(mount.Reader) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FetchUnsealedPiece indicates an expected call of FetchUnsealedPiece. -func (mr *MockMinerAPIMockRecorder) FetchUnsealedPiece(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchUnsealedPiece", reflect.TypeOf((*MockMinerAPI)(nil).FetchUnsealedPiece), arg0, arg1) -} - -// GetUnpaddedCARSize mocks base method. -func (m *MockMinerAPI) GetUnpaddedCARSize(arg0 context.Context, arg1 cid.Cid) (uint64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetUnpaddedCARSize", arg0, arg1) - ret0, _ := ret[0].(uint64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetUnpaddedCARSize indicates an expected call of GetUnpaddedCARSize. -func (mr *MockMinerAPIMockRecorder) GetUnpaddedCARSize(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUnpaddedCARSize", reflect.TypeOf((*MockMinerAPI)(nil).GetUnpaddedCARSize), arg0, arg1) -} - -// IsUnsealed mocks base method. -func (m *MockMinerAPI) IsUnsealed(arg0 context.Context, arg1 cid.Cid) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IsUnsealed", arg0, arg1) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// IsUnsealed indicates an expected call of IsUnsealed. -func (mr *MockMinerAPIMockRecorder) IsUnsealed(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsUnsealed", reflect.TypeOf((*MockMinerAPI)(nil).IsUnsealed), arg0, arg1) -} - -// Start mocks base method. -func (m *MockMinerAPI) Start(arg0 context.Context) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Start", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Start indicates an expected call of Start. -func (mr *MockMinerAPIMockRecorder) Start(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockMinerAPI)(nil).Start), arg0) -} diff --git a/markets/dagstore/mount.go b/markets/dagstore/mount.go deleted file mode 100644 index 0ecdc98082c..00000000000 --- a/markets/dagstore/mount.go +++ /dev/null @@ -1,91 +0,0 @@ -package dagstore - -import ( - "context" - "net/url" - - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/dagstore/mount" -) - -const lotusScheme = "lotus" - -var _ mount.Mount = (*LotusMount)(nil) - -// mountTemplate returns a templated LotusMount containing the supplied API. -// -// It is called when registering a mount type with the mount registry -// of the DAG store. It is used to reinstantiate mounts after a restart. -// -// When the registry needs to deserialize a mount it clones the template then -// calls Deserialize on the cloned instance, which will have a reference to the -// lotus mount API supplied here. -func mountTemplate(api MinerAPI) *LotusMount { - return &LotusMount{API: api} -} - -// LotusMount is a DAGStore mount implementation that fetches deal data -// from a PieceCID. -type LotusMount struct { - API MinerAPI - PieceCid cid.Cid -} - -func NewLotusMount(pieceCid cid.Cid, api MinerAPI) (*LotusMount, error) { - return &LotusMount{ - PieceCid: pieceCid, - API: api, - }, nil -} - -func (l *LotusMount) Serialize() *url.URL { - return &url.URL{ - Host: l.PieceCid.String(), - } -} - -func (l *LotusMount) Deserialize(u *url.URL) error { - pieceCid, err := cid.Decode(u.Host) - if err != nil { - return xerrors.Errorf("failed to parse PieceCid from host '%s': %w", u.Host, err) - } - l.PieceCid = pieceCid - return nil -} - -func (l *LotusMount) Fetch(ctx context.Context) (mount.Reader, error) { - return l.API.FetchUnsealedPiece(ctx, l.PieceCid) -} - -func (l *LotusMount) Info() mount.Info { - return mount.Info{ - Kind: mount.KindRemote, - AccessSequential: true, - AccessSeek: true, - AccessRandom: true, - } -} - -func (l *LotusMount) Close() error { - return nil -} - -func (l *LotusMount) Stat(ctx context.Context) (mount.Stat, error) { - size, err := l.API.GetUnpaddedCARSize(ctx, l.PieceCid) - if err != nil { - return mount.Stat{}, xerrors.Errorf("failed to fetch piece size for piece %s: %w", l.PieceCid, err) - } - isUnsealed, err := l.API.IsUnsealed(ctx, l.PieceCid) - if err != nil { - return mount.Stat{}, xerrors.Errorf("failed to verify if we have the unsealed piece %s: %w", l.PieceCid, err) - } - - // TODO Mark false when storage deal expires. - return mount.Stat{ - Exists: true, - Size: int64(size), - Ready: isUnsealed, - }, nil -} diff --git a/markets/dagstore/mount_test.go b/markets/dagstore/mount_test.go deleted file mode 100644 index d415f8d8856..00000000000 --- a/markets/dagstore/mount_test.go +++ /dev/null @@ -1,151 +0,0 @@ -// stm: @unit -package dagstore - -import ( - "context" - "io" - "net/url" - "strings" - "testing" - - "github.com/golang/mock/gomock" - blocksutil "github.com/ipfs/go-ipfs-blocksutil" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/dagstore/mount" - - mock_dagstore "github.com/filecoin-project/lotus/markets/dagstore/mocks" -) - -func TestLotusMount(t *testing.T) { - //stm: @MARKET_DAGSTORE_FETCH_UNSEALED_PIECE_001, @MARKET_DAGSTORE_GET_UNPADDED_CAR_SIZE_001 - //stm: @MARKET_DAGSTORE_IS_PIECE_UNSEALED_001 - ctx := context.Background() - bgen := blocksutil.NewBlockGenerator() - cid := bgen.Next().Cid() - - mockCtrl := gomock.NewController(t) - // when test is done, assert expectations on all mock objects. - defer mockCtrl.Finish() - - // create a mock lotus api that returns the reader we want - mockLotusMountAPI := mock_dagstore.NewMockMinerAPI(mockCtrl) - - mockLotusMountAPI.EXPECT().IsUnsealed(gomock.Any(), cid).Return(true, nil).Times(1) - - mr1 := struct { - io.ReadCloser - io.ReaderAt - io.Seeker - }{ - ReadCloser: io.NopCloser(strings.NewReader("testing")), - ReaderAt: nil, - Seeker: nil, - } - mr2 := struct { - io.ReadCloser - io.ReaderAt - io.Seeker - }{ - ReadCloser: io.NopCloser(strings.NewReader("testing")), - ReaderAt: nil, - Seeker: nil, - } - - mockLotusMountAPI.EXPECT().FetchUnsealedPiece(gomock.Any(), cid).Return(mr1, nil).Times(1) - mockLotusMountAPI.EXPECT().FetchUnsealedPiece(gomock.Any(), cid).Return(mr2, nil).Times(1) - mockLotusMountAPI.EXPECT().GetUnpaddedCARSize(ctx, cid).Return(uint64(100), nil).Times(1) - - mnt, err := NewLotusMount(cid, mockLotusMountAPI) - require.NoError(t, err) - info := mnt.Info() - require.Equal(t, info.Kind, mount.KindRemote) - - // fetch and assert success - rd, err := mnt.Fetch(context.Background()) - require.NoError(t, err) - - bz, err := io.ReadAll(rd) - require.NoError(t, err) - require.NoError(t, rd.Close()) - require.Equal(t, []byte("testing"), bz) - - stat, err := mnt.Stat(ctx) - require.NoError(t, err) - require.EqualValues(t, 100, stat.Size) - - // serialize url then deserialize from mount template -> should get back - // the same mount - url := mnt.Serialize() - mnt2 := mountTemplate(mockLotusMountAPI) - err = mnt2.Deserialize(url) - require.NoError(t, err) - - // fetching on this mount should get us back the same data. - rd, err = mnt2.Fetch(context.Background()) - require.NoError(t, err) - bz, err = io.ReadAll(rd) - require.NoError(t, err) - require.NoError(t, rd.Close()) - require.Equal(t, []byte("testing"), bz) -} - -func TestLotusMountDeserialize(t *testing.T) { - //stm: @MARKET_DAGSTORE_DESERIALIZE_CID_001 - api := &minerAPI{} - - bgen := blocksutil.NewBlockGenerator() - cid := bgen.Next().Cid() - - // success - us := lotusScheme + "://" + cid.String() - u, err := url.Parse(us) - require.NoError(t, err) - - mnt := mountTemplate(api) - err = mnt.Deserialize(u) - require.NoError(t, err) - - require.Equal(t, cid, mnt.PieceCid) - require.Equal(t, api, mnt.API) - - // fails if cid is not valid - us = lotusScheme + "://" + "rand" - u, err = url.Parse(us) - require.NoError(t, err) - err = mnt.Deserialize(u) - require.Error(t, err) - require.Contains(t, err.Error(), "failed to parse PieceCid") -} - -func TestLotusMountRegistration(t *testing.T) { - //stm: @MARKET_DAGSTORE_FETCH_UNSEALED_PIECE_001, @MARKET_DAGSTORE_GET_UNPADDED_CAR_SIZE_001 - //stm: @MARKET_DAGSTORE_IS_PIECE_UNSEALED_001 - ctx := context.Background() - bgen := blocksutil.NewBlockGenerator() - cid := bgen.Next().Cid() - - // success - us := lotusScheme + "://" + cid.String() - u, err := url.Parse(us) - require.NoError(t, err) - - mockCtrl := gomock.NewController(t) - // when test is done, assert expectations on all mock objects. - defer mockCtrl.Finish() - - mockLotusMountAPI := mock_dagstore.NewMockMinerAPI(mockCtrl) - registry := mount.NewRegistry() - err = registry.Register(lotusScheme, mountTemplate(mockLotusMountAPI)) - require.NoError(t, err) - - mnt, err := registry.Instantiate(u) - require.NoError(t, err) - - mockLotusMountAPI.EXPECT().IsUnsealed(ctx, cid).Return(true, nil) - mockLotusMountAPI.EXPECT().GetUnpaddedCARSize(ctx, cid).Return(uint64(100), nil).Times(1) - stat, err := mnt.Stat(context.Background()) - require.NoError(t, err) - require.EqualValues(t, 100, stat.Size) - require.True(t, stat.Ready) -} diff --git a/markets/dagstore/wrapper.go b/markets/dagstore/wrapper.go deleted file mode 100644 index a929ad1fc93..00000000000 --- a/markets/dagstore/wrapper.go +++ /dev/null @@ -1,436 +0,0 @@ -package dagstore - -import ( - "context" - "errors" - "fmt" - "math" - "os" - "path/filepath" - "sync" - "time" - - "github.com/ipfs/go-cid" - ds "github.com/ipfs/go-datastore" - levelds "github.com/ipfs/go-ds-leveldb" - measure "github.com/ipfs/go-ds-measure" - logging "github.com/ipfs/go-log/v2" - carindex "github.com/ipld/go-car/v2/index" - "github.com/libp2p/go-libp2p/core/host" - ldbopts "github.com/syndtr/goleveldb/leveldb/opt" - "golang.org/x/xerrors" - - "github.com/filecoin-project/dagstore" - "github.com/filecoin-project/dagstore/index" - "github.com/filecoin-project/dagstore/mount" - "github.com/filecoin-project/dagstore/shard" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates" - "github.com/filecoin-project/go-fil-markets/stores" - "github.com/filecoin-project/go-statemachine/fsm" - - "github.com/filecoin-project/lotus/node/config" -) - -const ( - maxRecoverAttempts = 1 - shardRegMarker = ".shard-registration-complete" -) - -var log = logging.Logger("dagstore") - -type Wrapper struct { - ctx context.Context - cancel context.CancelFunc - backgroundWg sync.WaitGroup - - cfg config.DAGStoreConfig - dagst dagstore.Interface - minerAPI MinerAPI - failureCh chan dagstore.ShardResult - gcInterval time.Duration -} - -var _ stores.DAGStoreWrapper = (*Wrapper)(nil) - -func NewDAGStore(cfg config.DAGStoreConfig, minerApi MinerAPI, h host.Host) (*dagstore.DAGStore, *Wrapper, error) { - // construct the DAG Store. - registry := mount.NewRegistry() - if err := registry.Register(lotusScheme, mountTemplate(minerApi)); err != nil { - return nil, nil, xerrors.Errorf("failed to create registry: %w", err) - } - - // The dagstore will write Shard failures to the `failureCh` here. - failureCh := make(chan dagstore.ShardResult, 1) - - var ( - transientsDir = filepath.Join(cfg.RootDir, "transients") - datastoreDir = filepath.Join(cfg.RootDir, "datastore") - indexDir = filepath.Join(cfg.RootDir, "index") - ) - - dstore, err := newDatastore(datastoreDir) - if err != nil { - return nil, nil, xerrors.Errorf("failed to create dagstore datastore in %s: %w", datastoreDir, err) - } - - irepo, err := index.NewFSRepo(indexDir) - if err != nil { - return nil, nil, xerrors.Errorf("failed to initialise dagstore index repo: %w", err) - } - - topIndex := index.NewInverted(dstore) - dcfg := dagstore.Config{ - TransientsDir: transientsDir, - IndexRepo: irepo, - Datastore: dstore, - MountRegistry: registry, - FailureCh: failureCh, - TopLevelIndex: topIndex, - // not limiting fetches globally, as the Lotus mount does - // conditional throttling. - MaxConcurrentIndex: cfg.MaxConcurrentIndex, - MaxConcurrentReadyFetches: cfg.MaxConcurrentReadyFetches, - RecoverOnStart: dagstore.RecoverOnAcquire, - } - - dagst, err := dagstore.NewDAGStore(dcfg) - if err != nil { - return nil, nil, xerrors.Errorf("failed to create DAG store: %w", err) - } - - w := &Wrapper{ - cfg: cfg, - dagst: dagst, - minerAPI: minerApi, - failureCh: failureCh, - gcInterval: time.Duration(cfg.GCInterval), - } - - return dagst, w, nil -} - -// newDatastore creates a datastore under the given base directory -// for dagstore metadata. -func newDatastore(dir string) (ds.Batching, error) { - // Create the datastore directory if it doesn't exist yet. - if err := os.MkdirAll(dir, 0755); err != nil { - return nil, xerrors.Errorf("failed to create directory %s for DAG store datastore: %w", dir, err) - } - - // Create a new LevelDB datastore - dstore, err := levelds.NewDatastore(dir, &levelds.Options{ - Compression: ldbopts.NoCompression, - NoSync: false, - Strict: ldbopts.StrictAll, - ReadOnly: false, - }) - if err != nil { - return nil, xerrors.Errorf("failed to open datastore for DAG store: %w", err) - } - // Keep statistics about the datastore - mds := measure.New("measure.", dstore) - return mds, nil -} - -func (w *Wrapper) Start(ctx context.Context) error { - w.ctx, w.cancel = context.WithCancel(ctx) - - // Run a go-routine to do DagStore GC. - w.backgroundWg.Add(1) - go w.gcLoop() - - // Run a go-routine for shard recovery - if dss, ok := w.dagst.(*dagstore.DAGStore); ok { - w.backgroundWg.Add(1) - go dagstore.RecoverImmediately(w.ctx, dss, w.failureCh, maxRecoverAttempts, w.backgroundWg.Done) - } - - return w.dagst.Start(ctx) -} - -func (w *Wrapper) gcLoop() { - defer w.backgroundWg.Done() - - ticker := time.NewTicker(w.gcInterval) - defer ticker.Stop() - - for w.ctx.Err() == nil { - select { - // GC the DAG store on every tick - case <-ticker.C: - _, _ = w.dagst.GC(w.ctx) - - // Exit when the DAG store wrapper is shutdown - case <-w.ctx.Done(): - return - } - } -} - -func (w *Wrapper) LoadShard(ctx context.Context, pieceCid cid.Cid) (stores.ClosableBlockstore, error) { - log.Debugf("acquiring shard for piece CID %s", pieceCid) - - key := shard.KeyFromCID(pieceCid) - resch := make(chan dagstore.ShardResult, 1) - err := w.dagst.AcquireShard(ctx, key, resch, dagstore.AcquireOpts{}) - log.Debugf("sent message to acquire shard for piece CID %s", pieceCid) - - if err != nil { - if !errors.Is(err, dagstore.ErrShardUnknown) { - return nil, xerrors.Errorf("failed to schedule acquire shard for piece CID %s: %w", pieceCid, err) - } - - // if the DAGStore does not know about the Shard -> register it and then try to acquire it again. - log.Warnw("failed to load shard as shard is not registered, will re-register", "pieceCID", pieceCid) - // The path of a transient file that we can ask the DAG Store to use - // to perform the Indexing rather than fetching it via the Mount if - // we already have a transient file. However, we don't have it here - // and therefore we pass an empty file path. - carPath := "" - if err := stores.RegisterShardSync(ctx, w, pieceCid, carPath, false); err != nil { - return nil, xerrors.Errorf("failed to re-register shard during loading piece CID %s: %w", pieceCid, err) - } - log.Warnw("successfully re-registered shard", "pieceCID", pieceCid) - - resch = make(chan dagstore.ShardResult, 1) - if err := w.dagst.AcquireShard(ctx, key, resch, dagstore.AcquireOpts{}); err != nil { - return nil, xerrors.Errorf("failed to acquire Shard for piece CID %s after re-registering: %w", pieceCid, err) - } - } - - // TODO: The context is not yet being actively monitored by the DAG store, - // so we need to select against ctx.Done() until the following issue is - // implemented: - // https://github.com/filecoin-project/dagstore/issues/39 - var res dagstore.ShardResult - select { - case <-ctx.Done(): - return nil, ctx.Err() - case res = <-resch: - if res.Error != nil { - return nil, xerrors.Errorf("failed to acquire shard for piece CID %s: %w", pieceCid, res.Error) - } - } - - bs, err := res.Accessor.Blockstore() - if err != nil { - return nil, err - } - - log.Debugf("successfully loaded blockstore for piece CID %s", pieceCid) - return &Blockstore{ReadBlockstore: bs, Closer: res.Accessor}, nil -} - -func (w *Wrapper) RegisterShard(ctx context.Context, pieceCid cid.Cid, carPath string, eagerInit bool, resch chan dagstore.ShardResult) error { - // Create a lotus mount with the piece CID - key := shard.KeyFromCID(pieceCid) - mt, err := NewLotusMount(pieceCid, w.minerAPI) - if err != nil { - return xerrors.Errorf("failed to create lotus mount for piece CID %s: %w", pieceCid, err) - } - - // Register the shard - opts := dagstore.RegisterOpts{ - ExistingTransient: carPath, - LazyInitialization: !eagerInit, - } - err = w.dagst.RegisterShard(ctx, key, mt, resch, opts) - if err != nil { - return xerrors.Errorf("failed to schedule register shard for piece CID %s: %w", pieceCid, err) - } - log.Debugf("successfully submitted Register Shard request for piece CID %s with eagerInit=%t", pieceCid, eagerInit) - - return nil -} - -func (w *Wrapper) DestroyShard(ctx context.Context, pieceCid cid.Cid, resch chan dagstore.ShardResult) error { - key := shard.KeyFromCID(pieceCid) - - opts := dagstore.DestroyOpts{} - - err := w.dagst.DestroyShard(ctx, key, resch, opts) - - if err != nil { - return xerrors.Errorf("failed to schedule destroy shard for piece CID %s: %w", pieceCid, err) - } - log.Debugf("successfully submitted destroy Shard request for piece CID %s", pieceCid) - - return nil - -} - -func (w *Wrapper) MigrateDeals(ctx context.Context, deals []storagemarket.MinerDeal) (bool, error) { - log := log.Named("migrator") - - // Check if all deals have already been registered as shards - isComplete, err := w.registrationComplete() - if err != nil { - return false, xerrors.Errorf("failed to get dagstore migration status: %w", err) - } - if isComplete { - // All deals have been registered as shards, bail out - log.Info("no shard migration necessary; already marked complete") - return false, nil - } - - log.Infow("registering shards for all active deals in sealing subsystem", "count", len(deals)) - - inSealingSubsystem := make(map[fsm.StateKey]struct{}, len(providerstates.StatesKnownBySealingSubsystem)) - for _, s := range providerstates.StatesKnownBySealingSubsystem { - inSealingSubsystem[s] = struct{}{} - } - - // channel where results will be received, and channel where the total - // number of registered shards will be sent. - resch := make(chan dagstore.ShardResult, 32) - totalCh := make(chan int) - doneCh := make(chan struct{}) - - // Start making progress consuming results. We won't know how many to - // actually consume until we register all shards. - // - // If there are any problems registering shards, just log an error - go func() { - defer close(doneCh) - - var total = math.MaxInt64 - var res dagstore.ShardResult - for rcvd := 0; rcvd < total; { - select { - case total = <-totalCh: - // we now know the total number of registered shards - // nullify so that we no longer consume from it after closed. - close(totalCh) - totalCh = nil - case res = <-resch: - rcvd++ - if res.Error == nil { - log.Infow("async shard registration completed successfully", "shard_key", res.Key) - } else { - log.Warnw("async shard registration failed", "shard_key", res.Key, "error", res.Error) - } - } - } - }() - - // Filter for deals that are handed off. - // - // If the deal has not yet been handed off to the sealing subsystem, we - // don't need to call RegisterShard in this migration; RegisterShard will - // be called in the new code once the deal reaches the state where it's - // handed off to the sealing subsystem. - var registered int - for _, deal := range deals { - pieceCid := deal.Proposal.PieceCID - - // enrich log statements in this iteration with deal ID and piece CID. - log := log.With("deal_id", deal.DealID, "piece_cid", pieceCid) - - // Filter for deals that have been handed off to the sealing subsystem - if _, ok := inSealingSubsystem[deal.State]; !ok { - log.Infow("deal not ready; skipping") - continue - } - - log.Infow("registering deal in dagstore with lazy init") - - // Register the deal as a shard with the DAG store with lazy initialization. - // The index will be populated the first time the deal is retrieved, or - // through the bulk initialization script. - err = w.RegisterShard(ctx, pieceCid, "", false, resch) - if err != nil { - log.Warnw("failed to register shard", "error", err) - continue - } - registered++ - } - - log.Infow("finished registering all shards", "total", registered) - totalCh <- registered - <-doneCh - - log.Infow("confirmed registration of all shards") - - // Completed registering all shards, so mark the migration as complete - err = w.markRegistrationComplete() - if err != nil { - log.Errorf("failed to mark shards as registered: %s", err) - } else { - log.Info("successfully marked migration as complete") - } - - log.Infow("dagstore migration complete") - - return true, nil -} - -// Check for the existence of a "marker" file indicating that the migration -// has completed -func (w *Wrapper) registrationComplete() (bool, error) { - path := filepath.Join(w.cfg.RootDir, shardRegMarker) - _, err := os.Stat(path) - if os.IsNotExist(err) { - return false, nil - } - if err != nil { - return false, err - } - return true, nil -} - -// Create a "marker" file indicating that the migration has completed -func (w *Wrapper) markRegistrationComplete() error { - path := filepath.Join(w.cfg.RootDir, shardRegMarker) - file, err := os.Create(path) - if err != nil { - return err - } - return file.Close() -} - -// Get all the pieces that contain a block -func (w *Wrapper) GetPiecesContainingBlock(blockCID cid.Cid) ([]cid.Cid, error) { - // Pieces are stored as "shards" in the DAG store - shardKeys, err := w.dagst.ShardsContainingMultihash(w.ctx, blockCID.Hash()) - if err != nil { - return nil, xerrors.Errorf("getting pieces containing block %s: %w", blockCID, err) - } - - // Convert from shard key to cid - pieceCids := make([]cid.Cid, 0, len(shardKeys)) - for _, k := range shardKeys { - c, err := cid.Parse(k.String()) - if err != nil { - prefix := fmt.Sprintf("getting pieces containing block %s:", blockCID) - return nil, xerrors.Errorf("%s converting shard key %s to piece cid: %w", prefix, k, err) - } - - pieceCids = append(pieceCids, c) - } - - return pieceCids, nil -} - -func (w *Wrapper) GetIterableIndexForPiece(pieceCid cid.Cid) (carindex.IterableIndex, error) { - return w.dagst.GetIterableIndex(shard.KeyFromCID(pieceCid)) -} - -func (w *Wrapper) Close() error { - // Cancel the context - w.cancel() - - // Close the DAG store - log.Info("will close the dagstore") - if err := w.dagst.Close(); err != nil { - return xerrors.Errorf("failed to close dagstore: %w", err) - } - log.Info("dagstore closed") - - // Wait for the background go routine to exit - log.Info("waiting for dagstore background wrapper goroutines to exit") - w.backgroundWg.Wait() - log.Info("exited dagstore background wrapper goroutines") - - return nil -} diff --git a/markets/dagstore/wrapper_migration_test.go b/markets/dagstore/wrapper_migration_test.go deleted file mode 100644 index db2c9768b1a..00000000000 --- a/markets/dagstore/wrapper_migration_test.go +++ /dev/null @@ -1,153 +0,0 @@ -// stm: #integration -package dagstore - -import ( - "context" - "io" - "testing" - - mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/dagstore" - "github.com/filecoin-project/dagstore/mount" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes" - tut "github.com/filecoin-project/go-fil-markets/shared_testutil" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-state-types/abi" - markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" - - "github.com/filecoin-project/lotus/node/config" -) - -func TestShardRegistration(t *testing.T) { - ps := tut.NewTestPieceStore() - sa := testnodes.NewTestSectorAccessor() - - ctx := context.Background() - cids := tut.GenerateCids(4) - pieceCidUnsealed := cids[0] - pieceCidSealed := cids[1] - pieceCidUnsealed2 := cids[2] - pieceCidUnsealed3 := cids[3] - - sealedSector := abi.SectorNumber(1) - unsealedSector1 := abi.SectorNumber(2) - unsealedSector2 := abi.SectorNumber(3) - unsealedSector3 := abi.SectorNumber(4) - - // ps.ExpectPiece(pieceCidUnsealed, piecestore.PieceInfo{ - // PieceCID: pieceCidUnsealed, - // Deals: []piecestore.DealInfo{ - // { - // SectorID: unsealedSector1, - // }, - // }, - // }) - // - // ps.ExpectPiece(pieceCidSealed, piecestore.PieceInfo{ - // PieceCID: pieceCidSealed, - // Deals: []piecestore.DealInfo{ - // { - // SectorID: sealedSector, - // }, - // }, - // }) - - deals := []storagemarket.MinerDeal{{ - // Should be registered - //stm: @MARKET_DAGSTORE_MIGRATE_DEALS_001 - State: storagemarket.StorageDealSealing, - SectorNumber: unsealedSector1, - ClientDealProposal: markettypes.ClientDealProposal{ - Proposal: markettypes.DealProposal{ - PieceCID: pieceCidUnsealed, - }, - }, - }, { - // Should be registered with lazy registration (because sector is sealed) - State: storagemarket.StorageDealSealing, - SectorNumber: sealedSector, - ClientDealProposal: markettypes.ClientDealProposal{ - Proposal: markettypes.DealProposal{ - PieceCID: pieceCidSealed, - }, - }, - }, { - // Should be ignored because deal is no longer active - //stm: @MARKET_DAGSTORE_MIGRATE_DEALS_003 - State: storagemarket.StorageDealError, - SectorNumber: unsealedSector2, - ClientDealProposal: markettypes.ClientDealProposal{ - Proposal: markettypes.DealProposal{ - PieceCID: pieceCidUnsealed2, - }, - }, - }, { - // Should be ignored because deal is not yet sealing - State: storagemarket.StorageDealFundsReserved, - SectorNumber: unsealedSector3, - ClientDealProposal: markettypes.ClientDealProposal{ - Proposal: markettypes.DealProposal{ - PieceCID: pieceCidUnsealed3, - }, - }, - }} - - cfg := config.DefaultStorageMiner().DAGStore - cfg.RootDir = t.TempDir() - - h, err := mocknet.New().GenPeer() - require.NoError(t, err) - - mapi := NewMinerAPI(ps, &wrappedSA{sa}, 10, 5) - dagst, w, err := NewDAGStore(cfg, mapi, h) - require.NoError(t, err) - require.NotNil(t, dagst) - require.NotNil(t, w) - - err = dagst.Start(context.Background()) - require.NoError(t, err) - - migrated, err := w.MigrateDeals(ctx, deals) - require.True(t, migrated) - require.NoError(t, err) - - //stm: @MARKET_DAGSTORE_GET_ALL_SHARDS_001 - info := dagst.AllShardsInfo() - require.Len(t, info, 2) - for _, i := range info { - require.Equal(t, dagstore.ShardStateNew, i.ShardState) - } - - // Run register shard migration again - //stm: @MARKET_DAGSTORE_MIGRATE_DEALS_002 - migrated, err = w.MigrateDeals(ctx, deals) - require.False(t, migrated) - require.NoError(t, err) - - // ps.VerifyExpectations(t) -} - -type wrappedSA struct { - retrievalmarket.SectorAccessor -} - -func (w *wrappedSA) UnsealSectorAt(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (mount.Reader, error) { - r, err := w.UnsealSector(ctx, sectorID, pieceOffset, length) - if err != nil { - return nil, err - } - return struct { - io.ReadCloser - io.Seeker - io.ReaderAt - }{ - ReadCloser: r, - Seeker: nil, - ReaderAt: nil, - }, err -} - -var _ SectorAccessor = &wrappedSA{} diff --git a/markets/dagstore/wrapper_test.go b/markets/dagstore/wrapper_test.go deleted file mode 100644 index f3b5e1b52c0..00000000000 --- a/markets/dagstore/wrapper_test.go +++ /dev/null @@ -1,262 +0,0 @@ -// stm: #unit -package dagstore - -import ( - "bytes" - "context" - "os" - "testing" - "time" - - "github.com/ipfs/go-cid" - carindex "github.com/ipld/go-car/v2/index" - mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" - mh "github.com/multiformats/go-multihash" - "github.com/stretchr/testify/require" - "golang.org/x/xerrors" - - "github.com/filecoin-project/dagstore" - "github.com/filecoin-project/dagstore/mount" - "github.com/filecoin-project/dagstore/shard" - - "github.com/filecoin-project/lotus/node/config" -) - -// TestWrapperAcquireRecovery verifies that if acquire shard returns a "not found" -// error, the wrapper will attempt to register the shard then reacquire -func TestWrapperAcquireRecoveryDestroy(t *testing.T) { - ctx := context.Background() - pieceCid, err := cid.Parse("bafkqaaa") - require.NoError(t, err) - - h, err := mocknet.New().GenPeer() - require.NoError(t, err) - // Create a DAG store wrapper - dagst, w, err := NewDAGStore(config.DAGStoreConfig{ - RootDir: t.TempDir(), - GCInterval: config.Duration(1 * time.Millisecond), - }, mockLotusMount{}, h) - require.NoError(t, err) - - defer dagst.Close() //nolint:errcheck - - // Return an error from acquire shard the first time - acquireShardErr := make(chan error, 1) - acquireShardErr <- xerrors.Errorf("unknown shard: %w", dagstore.ErrShardUnknown) - - // Create a mock DAG store in place of the real DAG store - mock := &mockDagStore{ - acquireShardErr: acquireShardErr, - acquireShardRes: dagstore.ShardResult{ - Accessor: getShardAccessor(t), - }, - register: make(chan shard.Key, 1), - destroy: make(chan shard.Key, 1), - } - w.dagst = mock - - //stm: @MARKET_DAGSTORE_ACQUIRE_SHARD_002 - mybs, err := w.LoadShard(ctx, pieceCid) - require.NoError(t, err) - - // Expect the wrapper to try to recover from the error returned from - // acquire shard by calling register shard with the same key - tctx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - select { - case <-tctx.Done(): - require.Fail(t, "failed to call register") - case k := <-mock.register: - require.Equal(t, k.String(), pieceCid.String()) - } - - // Verify that we can get things from the acquired blockstore - var count int - ch, err := mybs.AllKeysChan(ctx) - require.NoError(t, err) - for range ch { - count++ - } - require.Greater(t, count, 0) - - // Destroy the shard - dr := make(chan dagstore.ShardResult, 1) - err = w.DestroyShard(ctx, pieceCid, dr) - require.NoError(t, err) - - dctx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - select { - case <-dctx.Done(): - require.Fail(t, "failed to call destroy") - case k := <-mock.destroy: - require.Equal(t, k.String(), pieceCid.String()) - } - - var dcount int - dch, err := mybs.AllKeysChan(ctx) - require.NoError(t, err) - for range dch { - count++ - } - require.Equal(t, dcount, 0) -} - -// TestWrapperBackground verifies the behaviour of the background go routine -func TestWrapperBackground(t *testing.T) { - ctx := context.Background() - h, err := mocknet.New().GenPeer() - require.NoError(t, err) - - // Create a DAG store wrapper - dagst, w, err := NewDAGStore(config.DAGStoreConfig{ - RootDir: t.TempDir(), - GCInterval: config.Duration(1 * time.Millisecond), - }, mockLotusMount{}, h) - require.NoError(t, err) - - defer dagst.Close() //nolint:errcheck - - // Create a mock DAG store in place of the real DAG store - mock := &mockDagStore{ - gc: make(chan struct{}, 1), - recover: make(chan shard.Key, 1), - close: make(chan struct{}, 1), - } - w.dagst = mock - - // Start up the wrapper - //stm: @MARKET_DAGSTORE_START_001 - err = w.Start(ctx) - require.NoError(t, err) - - // Expect GC to be called automatically - //stm: @MARKET_DAGSTORE_START_002 - tctx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - select { - case <-tctx.Done(): - require.Fail(t, "failed to call GC") - case <-mock.gc: - } - - // Expect that when the wrapper is closed it will call close on the - // DAG store - //stm: @MARKET_DAGSTORE_CLOSE_001 - err = w.Close() - require.NoError(t, err) - - tctx, cancel3 := context.WithTimeout(ctx, time.Second) - defer cancel3() - select { - case <-tctx.Done(): - require.Fail(t, "failed to call close") - case <-mock.close: - } -} - -type mockDagStore struct { - acquireShardErr chan error - acquireShardRes dagstore.ShardResult - register chan shard.Key - - gc chan struct{} - recover chan shard.Key - destroy chan shard.Key - close chan struct{} -} - -func (m *mockDagStore) GetIterableIndex(key shard.Key) (carindex.IterableIndex, error) { - return nil, nil -} - -func (m *mockDagStore) ShardsContainingMultihash(ctx context.Context, h mh.Multihash) ([]shard.Key, error) { - return nil, nil -} - -func (m *mockDagStore) GetShardKeysForCid(c cid.Cid) ([]shard.Key, error) { - panic("implement me") -} - -func (m *mockDagStore) DestroyShard(ctx context.Context, key shard.Key, out chan dagstore.ShardResult, _ dagstore.DestroyOpts) error { - m.destroy <- key - out <- dagstore.ShardResult{Key: key} - return nil -} - -func (m *mockDagStore) GetShardInfo(k shard.Key) (dagstore.ShardInfo, error) { - panic("implement me") -} - -func (m *mockDagStore) AllShardsInfo() dagstore.AllShardsInfo { - panic("implement me") -} - -func (m *mockDagStore) Start(_ context.Context) error { - return nil -} - -func (m *mockDagStore) RegisterShard(ctx context.Context, key shard.Key, mnt mount.Mount, out chan dagstore.ShardResult, opts dagstore.RegisterOpts) error { - m.register <- key - out <- dagstore.ShardResult{Key: key} - return nil -} - -func (m *mockDagStore) AcquireShard(ctx context.Context, key shard.Key, out chan dagstore.ShardResult, _ dagstore.AcquireOpts) error { - select { - case err := <-m.acquireShardErr: - return err - default: - } - - out <- m.acquireShardRes - return nil -} - -func (m *mockDagStore) RecoverShard(ctx context.Context, key shard.Key, out chan dagstore.ShardResult, _ dagstore.RecoverOpts) error { - m.recover <- key - return nil -} - -func (m *mockDagStore) GC(ctx context.Context) (*dagstore.GCResult, error) { - select { - case m.gc <- struct{}{}: - default: - } - - return nil, nil -} - -func (m *mockDagStore) Close() error { - m.close <- struct{}{} - return nil -} - -type mockLotusMount struct { -} - -func (m mockLotusMount) Start(ctx context.Context) error { - return nil -} - -func (m mockLotusMount) FetchUnsealedPiece(context.Context, cid.Cid) (mount.Reader, error) { - panic("implement me") -} - -func (m mockLotusMount) GetUnpaddedCARSize(ctx context.Context, pieceCid cid.Cid) (uint64, error) { - panic("implement me") -} - -func (m mockLotusMount) IsUnsealed(ctx context.Context, pieceCid cid.Cid) (bool, error) { - panic("implement me") -} - -func getShardAccessor(t *testing.T) *dagstore.ShardAccessor { - data, err := os.ReadFile("./fixtures/sample-rw-bs-v2.car") - require.NoError(t, err) - buff := bytes.NewReader(data) - reader := &mount.NopCloser{Reader: buff, ReaderAt: buff, Seeker: buff} - shardAccessor, err := dagstore.NewShardAccessor(reader, nil, nil) - require.NoError(t, err) - return shardAccessor -} diff --git a/markets/dealfilter/cli.go b/markets/dealfilter/cli.go deleted file mode 100644 index af832bfa08a..00000000000 --- a/markets/dealfilter/cli.go +++ /dev/null @@ -1,62 +0,0 @@ -package dealfilter - -import ( - "bytes" - "context" - "encoding/json" - "os/exec" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/storagemarket" - - "github.com/filecoin-project/lotus/node/modules/dtypes" -) - -func CliStorageDealFilter(cmd string) dtypes.StorageDealFilter { - return func(ctx context.Context, deal storagemarket.MinerDeal) (bool, string, error) { - d := struct { - storagemarket.MinerDeal - DealType string - }{ - MinerDeal: deal, - DealType: "storage", - } - return runDealFilter(ctx, cmd, d) - } -} - -func CliRetrievalDealFilter(cmd string) dtypes.RetrievalDealFilter { - return func(ctx context.Context, deal retrievalmarket.ProviderDealState) (bool, string, error) { - d := struct { - retrievalmarket.ProviderDealState - DealType string - }{ - ProviderDealState: deal, - DealType: "retrieval", - } - return runDealFilter(ctx, cmd, d) - } -} - -func runDealFilter(ctx context.Context, cmd string, deal interface{}) (bool, string, error) { - j, err := json.MarshalIndent(deal, "", " ") - if err != nil { - return false, "", err - } - - var out bytes.Buffer - - c := exec.Command("sh", "-c", cmd) - c.Stdin = bytes.NewReader(j) - c.Stdout = &out - c.Stderr = &out - - switch err := c.Run().(type) { - case nil: - return true, "", nil - case *exec.ExitError: - return false, out.String(), nil - default: - return false, "filter cmd run error", err - } -} diff --git a/markets/idxprov/idxprov_test/noop.go b/markets/idxprov/idxprov_test/noop.go deleted file mode 100644 index 535c13d2522..00000000000 --- a/markets/idxprov/idxprov_test/noop.go +++ /dev/null @@ -1,16 +0,0 @@ -package idxprov_test - -import ( - "context" -) - -type NoopMeshCreator struct { -} - -func NewNoopMeshCreator() *NoopMeshCreator { - return &NoopMeshCreator{} -} - -func (mc NoopMeshCreator) Connect(ctx context.Context) error { - return nil -} diff --git a/markets/idxprov/mesh.go b/markets/idxprov/mesh.go deleted file mode 100644 index e69e213adab..00000000000 --- a/markets/idxprov/mesh.go +++ /dev/null @@ -1,59 +0,0 @@ -package idxprov - -import ( - "context" - "fmt" - - logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/peer" - - "github.com/filecoin-project/lotus/api/v1api" -) - -var log = logging.Logger("idxprov") - -const protectTag = "index-provider-gossipsub" - -type MeshCreator interface { - Connect(ctx context.Context) error -} - -type Libp2pMeshCreator struct { - fullnodeApi v1api.FullNode - marketsHost host.Host -} - -func (mc Libp2pMeshCreator) Connect(ctx context.Context) error { - - // Add the markets host ID to list of daemon's protected peers first, before any attempt to - // connect to full node over libp2p. - marketsPeerID := mc.marketsHost.ID() - if err := mc.fullnodeApi.NetProtectAdd(ctx, []peer.ID{marketsPeerID}); err != nil { - return fmt.Errorf("failed to call NetProtectAdd on the full node, err: %w", err) - } - - faddrs, err := mc.fullnodeApi.NetAddrsListen(ctx) - if err != nil { - return fmt.Errorf("failed to fetch full node listen addrs, err: %w", err) - } - - // Connect from the full node, ask it to protect the connection and protect the connection on - // markets end too. Connection is initiated form full node to avoid the need to expose libp2p port on full node - if err := mc.fullnodeApi.NetConnect(ctx, peer.AddrInfo{ - ID: mc.marketsHost.ID(), - Addrs: mc.marketsHost.Addrs(), - }); err != nil { - return fmt.Errorf("failed to connect to index provider host from full node: %w", err) - } - mc.marketsHost.ConnManager().Protect(faddrs.ID, protectTag) - - log.Debugw("successfully connected to full node and asked it protect indexer provider peer conn", "fullNodeInfo", faddrs.String(), - "peerId", marketsPeerID) - - return nil -} - -func NewMeshCreator(fullnodeApi v1api.FullNode, marketsHost host.Host) MeshCreator { - return Libp2pMeshCreator{fullnodeApi, marketsHost} -} diff --git a/markets/journal.go b/markets/journal.go deleted file mode 100644 index 9c9c5be9c42..00000000000 --- a/markets/journal.go +++ /dev/null @@ -1,76 +0,0 @@ -package markets - -import ( - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/storagemarket" - - "github.com/filecoin-project/lotus/journal" -) - -type StorageClientEvt struct { - Event string - Deal storagemarket.ClientDeal -} - -type StorageProviderEvt struct { - Event string - Deal storagemarket.MinerDeal -} - -type RetrievalClientEvt struct { - Event string - Deal retrievalmarket.ClientDealState -} - -type RetrievalProviderEvt struct { - Event string - Deal retrievalmarket.ProviderDealState -} - -// StorageClientJournaler records journal events from the storage client. -func StorageClientJournaler(j journal.Journal, evtType journal.EventType) func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) { - return func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) { - j.RecordEvent(evtType, func() interface{} { - return StorageClientEvt{ - Event: storagemarket.ClientEvents[event], - Deal: deal, - } - }) - } -} - -// StorageProviderJournaler records journal events from the storage provider. -func StorageProviderJournaler(j journal.Journal, evtType journal.EventType) func(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) { - return func(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) { - j.RecordEvent(evtType, func() interface{} { - return StorageProviderEvt{ - Event: storagemarket.ProviderEvents[event], - Deal: deal, - } - }) - } -} - -// RetrievalClientJournaler records journal events from the retrieval client. -func RetrievalClientJournaler(j journal.Journal, evtType journal.EventType) func(event retrievalmarket.ClientEvent, deal retrievalmarket.ClientDealState) { - return func(event retrievalmarket.ClientEvent, deal retrievalmarket.ClientDealState) { - j.RecordEvent(evtType, func() interface{} { - return RetrievalClientEvt{ - Event: retrievalmarket.ClientEvents[event], - Deal: deal, - } - }) - } -} - -// RetrievalProviderJournaler records journal events from the retrieval provider. -func RetrievalProviderJournaler(j journal.Journal, evtType journal.EventType) func(event retrievalmarket.ProviderEvent, deal retrievalmarket.ProviderDealState) { - return func(event retrievalmarket.ProviderEvent, deal retrievalmarket.ProviderDealState) { - j.RecordEvent(evtType, func() interface{} { - return RetrievalProviderEvt{ - Event: retrievalmarket.ProviderEvents[event], - Deal: deal, - } - }) - } -} diff --git a/markets/loggers/loggers.go b/markets/loggers/loggers.go deleted file mode 100644 index e066c984399..00000000000 --- a/markets/loggers/loggers.go +++ /dev/null @@ -1,76 +0,0 @@ -package marketevents - -import ( - logging "github.com/ipfs/go-log/v2" - - datatransfer "github.com/filecoin-project/go-data-transfer/v2" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-state-types/abi" -) - -var log = logging.Logger("markets") - -// StorageClientLogger logs events from the storage client -func StorageClientLogger(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) { - log.Infow("storage client event", "name", storagemarket.ClientEvents[event], "proposal CID", deal.ProposalCid, "state", storagemarket.DealStates[deal.State], "message", deal.Message) -} - -// StorageProviderLogger logs events from the storage provider -func StorageProviderLogger(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) { - log.Infow("storage provider event", "name", storagemarket.ProviderEvents[event], "proposal CID", deal.ProposalCid, "state", storagemarket.DealStates[deal.State], "message", deal.Message) -} - -// RetrievalClientLogger logs events from the retrieval client -func RetrievalClientLogger(event retrievalmarket.ClientEvent, deal retrievalmarket.ClientDealState) { - method := log.Infow - if event == retrievalmarket.ClientEventBlocksReceived { - method = log.Debugw - } - method("retrieval client event", "name", retrievalmarket.ClientEvents[event], "deal ID", deal.ID, "state", retrievalmarket.DealStatuses[deal.Status], "message", deal.Message) -} - -// RetrievalProviderLogger logs events from the retrieval provider -func RetrievalProviderLogger(event retrievalmarket.ProviderEvent, deal retrievalmarket.ProviderDealState) { - method := log.Infow - if event == retrievalmarket.ProviderEventBlockSent { - method = log.Debugw - } - method("retrieval provider event", "name", retrievalmarket.ProviderEvents[event], "deal ID", deal.ID, "receiver", deal.Receiver, "state", retrievalmarket.DealStatuses[deal.Status], "message", deal.Message) -} - -// DataTransferLogger logs events from the data transfer module -func DataTransferLogger(event datatransfer.Event, state datatransfer.ChannelState) { - log.Debugw("data transfer event", - "name", datatransfer.Events[event.Code], - "status", datatransfer.Statuses[state.Status()], - "transfer ID", state.TransferID(), - "channel ID", state.ChannelID(), - "sent", state.Sent(), - "received", state.Received(), - "queued", state.Queued(), - "received count", state.ReceivedCidsTotal(), - "total size", state.TotalSize(), - "remote peer", state.OtherPeer(), - "event message", event.Message, - "channel message", state.Message()) -} - -// ReadyLogger returns a function to log the results of module initialization -func ReadyLogger(module string) func(error) { - return func(err error) { - if err != nil { - log.Errorw("module initialization error", "module", module, "err", err) - } else { - log.Infow("module ready", "module", module) - } - } -} - -type RetrievalEvent struct { - Event retrievalmarket.ClientEvent - Status retrievalmarket.DealStatus - BytesReceived uint64 - FundsSpent abi.TokenAmount - Err string -} diff --git a/markets/pricing/cli.go b/markets/pricing/cli.go deleted file mode 100644 index 48f56628fae..00000000000 --- a/markets/pricing/cli.go +++ /dev/null @@ -1,50 +0,0 @@ -package pricing - -import ( - "bytes" - "context" - "encoding/json" - "os/exec" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - - "github.com/filecoin-project/lotus/node/modules/dtypes" -) - -func ExternalRetrievalPricingFunc(cmd string) dtypes.RetrievalPricingFunc { - return func(ctx context.Context, pricingInput retrievalmarket.PricingInput) (retrievalmarket.Ask, error) { - return runPricingFunc(ctx, cmd, pricingInput) - } -} - -func runPricingFunc(_ context.Context, cmd string, params interface{}) (retrievalmarket.Ask, error) { - j, err := json.Marshal(params) - if err != nil { - return retrievalmarket.Ask{}, err - } - - var out bytes.Buffer - var errb bytes.Buffer - - c := exec.Command("sh", "-c", cmd) - c.Stdin = bytes.NewReader(j) - c.Stdout = &out - c.Stderr = &errb - - switch err := c.Run().(type) { - case nil: - bz := out.Bytes() - resp := retrievalmarket.Ask{} - - if err := json.Unmarshal(bz, &resp); err != nil { - return resp, xerrors.Errorf("failed to parse pricing output %s, err=%w", string(bz), err) - } - return resp, nil - case *exec.ExitError: - return retrievalmarket.Ask{}, xerrors.Errorf("pricing func exited with error: %s", errb.String()) - default: - return retrievalmarket.Ask{}, xerrors.Errorf("pricing func cmd run error: %w", err) - } -} diff --git a/markets/retrievaladapter/client.go b/markets/retrievaladapter/client.go deleted file mode 100644 index 34bc2489624..00000000000 --- a/markets/retrievaladapter/client.go +++ /dev/null @@ -1,127 +0,0 @@ -package retrievaladapter - -import ( - "context" - - "github.com/ipfs/go-cid" - "github.com/multiformats/go-multiaddr" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/go-state-types/abi" - paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/impl/full" - payapi "github.com/filecoin-project/lotus/node/impl/paych" -) - -type retrievalClientNode struct { - forceOffChain bool - - chainAPI full.ChainAPI - payAPI payapi.PaychAPI - stateAPI full.StateAPI -} - -// NewRetrievalClientNode returns a new node adapter for a retrieval client that talks to the -// Lotus Node -func NewRetrievalClientNode(forceOffChain bool, payAPI payapi.PaychAPI, chainAPI full.ChainAPI, stateAPI full.StateAPI) retrievalmarket.RetrievalClientNode { - return &retrievalClientNode{ - forceOffChain: forceOffChain, - chainAPI: chainAPI, - payAPI: payAPI, - stateAPI: stateAPI, - } -} - -// GetOrCreatePaymentChannel sets up a new payment channel if one does not exist -// between a client and a miner and ensures the client has the given amount of -// funds available in the channel. -func (rcn *retrievalClientNode) GetOrCreatePaymentChannel(ctx context.Context, clientAddress address.Address, minerAddress address.Address, clientFundsAvailable abi.TokenAmount, tok shared.TipSetToken) (address.Address, cid.Cid, error) { - // TODO: respect the provided TipSetToken (a serialized TipSetKey) when - // querying the chain - ci, err := rcn.payAPI.PaychGet(ctx, clientAddress, minerAddress, clientFundsAvailable, api.PaychGetOpts{ - OffChain: rcn.forceOffChain, - }) - if err != nil { - log.Errorw("paych get failed", "error", err) - return address.Undef, cid.Undef, err - } - - return ci.Channel, ci.WaitSentinel, nil -} - -// Allocate late creates a lane within a payment channel so that calls to -// CreatePaymentVoucher will automatically make vouchers only for the difference -// in total -func (rcn *retrievalClientNode) AllocateLane(ctx context.Context, paymentChannel address.Address) (uint64, error) { - return rcn.payAPI.PaychAllocateLane(ctx, paymentChannel) -} - -// CreatePaymentVoucher creates a new payment voucher in the given lane for a -// given payment channel so that all the payment vouchers in the lane add up -// to the given amount (so the payment voucher will be for the difference) -func (rcn *retrievalClientNode) CreatePaymentVoucher(ctx context.Context, paymentChannel address.Address, amount abi.TokenAmount, lane uint64, tok shared.TipSetToken) (*paychtypes.SignedVoucher, error) { - // TODO: respect the provided TipSetToken (a serialized TipSetKey) when - // querying the chain - voucher, err := rcn.payAPI.PaychVoucherCreate(ctx, paymentChannel, amount, lane) - if err != nil { - return nil, err - } - if voucher.Voucher == nil { - return nil, retrievalmarket.NewShortfallError(voucher.Shortfall) - } - return voucher.Voucher, nil -} - -func (rcn *retrievalClientNode) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) { - head, err := rcn.chainAPI.ChainHead(ctx) - if err != nil { - return nil, 0, err - } - - return head.Key().Bytes(), head.Height(), nil -} - -func (rcn *retrievalClientNode) WaitForPaymentChannelReady(ctx context.Context, messageCID cid.Cid) (address.Address, error) { - return rcn.payAPI.PaychGetWaitReady(ctx, messageCID) -} - -func (rcn *retrievalClientNode) CheckAvailableFunds(ctx context.Context, paymentChannel address.Address) (retrievalmarket.ChannelAvailableFunds, error) { - - channelAvailableFunds, err := rcn.payAPI.PaychAvailableFunds(ctx, paymentChannel) - if err != nil { - return retrievalmarket.ChannelAvailableFunds{}, err - } - return retrievalmarket.ChannelAvailableFunds{ - ConfirmedAmt: channelAvailableFunds.ConfirmedAmt, - PendingAmt: channelAvailableFunds.PendingAmt, - PendingWaitSentinel: channelAvailableFunds.PendingWaitSentinel, - QueuedAmt: channelAvailableFunds.QueuedAmt, - VoucherReedeemedAmt: channelAvailableFunds.VoucherReedeemedAmt, - }, nil -} - -func (rcn *retrievalClientNode) GetKnownAddresses(ctx context.Context, p retrievalmarket.RetrievalPeer, encodedTs shared.TipSetToken) ([]multiaddr.Multiaddr, error) { - tsk, err := types.TipSetKeyFromBytes(encodedTs) - if err != nil { - return nil, err - } - mi, err := rcn.stateAPI.StateMinerInfo(ctx, p.Address, tsk) - if err != nil { - return nil, err - } - multiaddrs := make([]multiaddr.Multiaddr, 0, len(mi.Multiaddrs)) - for _, a := range mi.Multiaddrs { - maddr, err := multiaddr.NewMultiaddrBytes(a) - if err != nil { - return nil, err - } - multiaddrs = append(multiaddrs, maddr) - } - - return multiaddrs, nil -} diff --git a/markets/retrievaladapter/client_blockstore.go b/markets/retrievaladapter/client_blockstore.go deleted file mode 100644 index 30fc5c73a5b..00000000000 --- a/markets/retrievaladapter/client_blockstore.go +++ /dev/null @@ -1,166 +0,0 @@ -package retrievaladapter - -import ( - "fmt" - "path/filepath" - "sync" - - bstore "github.com/ipfs/boxo/blockstore" - "github.com/ipfs/go-cid" - "github.com/ipld/go-car/v2/blockstore" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - - "github.com/filecoin-project/lotus/api" - lbstore "github.com/filecoin-project/lotus/blockstore" -) - -// ProxyBlockstoreAccessor is an accessor that returns a fixed blockstore. -// To be used in combination with IPFS integration. -type ProxyBlockstoreAccessor struct { - Blockstore bstore.Blockstore -} - -var _ retrievalmarket.BlockstoreAccessor = (*ProxyBlockstoreAccessor)(nil) - -func NewFixedBlockstoreAccessor(bs bstore.Blockstore) retrievalmarket.BlockstoreAccessor { - return &ProxyBlockstoreAccessor{Blockstore: bs} -} - -func (p *ProxyBlockstoreAccessor) Get(_ retrievalmarket.DealID, _ retrievalmarket.PayloadCID) (bstore.Blockstore, error) { - return p.Blockstore, nil -} - -func (p *ProxyBlockstoreAccessor) Done(_ retrievalmarket.DealID) error { - return nil -} - -func NewAPIBlockstoreAdapter(sub retrievalmarket.BlockstoreAccessor) *APIBlockstoreAccessor { - return &APIBlockstoreAccessor{ - sub: sub, - retrStores: map[retrievalmarket.DealID]api.RemoteStoreID{}, - remoteStores: map[api.RemoteStoreID]bstore.Blockstore{}, - } -} - -// APIBlockstoreAccessor adds support to API-specified remote blockstores -type APIBlockstoreAccessor struct { - sub retrievalmarket.BlockstoreAccessor - - retrStores map[retrievalmarket.DealID]api.RemoteStoreID - remoteStores map[api.RemoteStoreID]bstore.Blockstore - - accessLk sync.Mutex -} - -func (a *APIBlockstoreAccessor) Get(id retrievalmarket.DealID, payloadCID retrievalmarket.PayloadCID) (bstore.Blockstore, error) { - a.accessLk.Lock() - defer a.accessLk.Unlock() - - as, has := a.retrStores[id] - if !has { - return a.sub.Get(id, payloadCID) - } - - return a.remoteStores[as], nil -} - -func (a *APIBlockstoreAccessor) Done(id retrievalmarket.DealID) error { - a.accessLk.Lock() - defer a.accessLk.Unlock() - - if _, has := a.retrStores[id]; has { - delete(a.retrStores, id) - return nil - } - return a.sub.Done(id) -} - -func (a *APIBlockstoreAccessor) RegisterDealToRetrievalStore(id retrievalmarket.DealID, sid api.RemoteStoreID) error { - a.accessLk.Lock() - defer a.accessLk.Unlock() - - if _, has := a.retrStores[id]; has { - return xerrors.Errorf("apistore for deal %d already registered", id) - } - if _, has := a.remoteStores[sid]; !has { - return xerrors.Errorf("remote store not found") - } - - a.retrStores[id] = sid - return nil -} - -func (a *APIBlockstoreAccessor) RegisterApiStore(sid api.RemoteStoreID, st *lbstore.NetworkStore) error { - a.accessLk.Lock() - defer a.accessLk.Unlock() - - if _, has := a.remoteStores[sid]; has { - return xerrors.Errorf("remote store already registered with this uuid") - } - - a.remoteStores[sid] = st - - st.OnClose(func() { - a.accessLk.Lock() - defer a.accessLk.Unlock() - - if _, has := a.remoteStores[sid]; has { - delete(a.remoteStores, sid) - } - }) - return nil -} - -var _ retrievalmarket.BlockstoreAccessor = &APIBlockstoreAccessor{} - -type CARBlockstoreAccessor struct { - rootdir string - lk sync.Mutex - open map[retrievalmarket.DealID]*blockstore.ReadWrite -} - -var _ retrievalmarket.BlockstoreAccessor = (*CARBlockstoreAccessor)(nil) - -func NewCARBlockstoreAccessor(rootdir string) *CARBlockstoreAccessor { - return &CARBlockstoreAccessor{ - rootdir: rootdir, - open: make(map[retrievalmarket.DealID]*blockstore.ReadWrite), - } -} - -func (c *CARBlockstoreAccessor) Get(id retrievalmarket.DealID, payloadCid retrievalmarket.PayloadCID) (bstore.Blockstore, error) { - c.lk.Lock() - defer c.lk.Unlock() - - bs, ok := c.open[id] - if ok { - return bs, nil - } - - path := c.PathFor(id) - bs, err := blockstore.OpenReadWrite(path, []cid.Cid{payloadCid}, blockstore.UseWholeCIDs(true)) - if err != nil { - return nil, err - } - c.open[id] = bs - return bs, nil -} - -func (c *CARBlockstoreAccessor) Done(id retrievalmarket.DealID) error { - c.lk.Lock() - defer c.lk.Unlock() - - bs, ok := c.open[id] - if !ok { - return nil - } - - delete(c.open, id) - return bs.Finalize() -} - -func (c *CARBlockstoreAccessor) PathFor(id retrievalmarket.DealID) string { - return filepath.Join(c.rootdir, fmt.Sprintf("%d.car", id)) -} diff --git a/markets/retrievaladapter/provider.go b/markets/retrievaladapter/provider.go deleted file mode 100644 index 453474d4e6e..00000000000 --- a/markets/retrievaladapter/provider.go +++ /dev/null @@ -1,108 +0,0 @@ -package retrievaladapter - -import ( - "context" - - "github.com/hashicorp/go-multierror" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/go-state-types/abi" - paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" - - "github.com/filecoin-project/lotus/api/v1api" - "github.com/filecoin-project/lotus/chain/types" -) - -var log = logging.Logger("retrievaladapter") - -type retrievalProviderNode struct { - full v1api.FullNode -} - -var _ retrievalmarket.RetrievalProviderNode = (*retrievalProviderNode)(nil) - -// NewRetrievalProviderNode returns a new node adapter for a retrieval provider that talks to the -// Lotus Node -func NewRetrievalProviderNode(full v1api.FullNode) retrievalmarket.RetrievalProviderNode { - return &retrievalProviderNode{full: full} -} - -func (rpn *retrievalProviderNode) GetMinerWorkerAddress(ctx context.Context, miner address.Address, tok shared.TipSetToken) (address.Address, error) { - tsk, err := types.TipSetKeyFromBytes(tok) - if err != nil { - return address.Undef, err - } - - mi, err := rpn.full.StateMinerInfo(ctx, miner, tsk) - return mi.Worker, err -} - -func (rpn *retrievalProviderNode) SavePaymentVoucher(ctx context.Context, paymentChannel address.Address, voucher *paychtypes.SignedVoucher, proof []byte, expectedAmount abi.TokenAmount, tok shared.TipSetToken) (abi.TokenAmount, error) { - // TODO: respect the provided TipSetToken (a serialized TipSetKey) when - // querying the chain - added, err := rpn.full.PaychVoucherAdd(ctx, paymentChannel, voucher, proof, expectedAmount) - return added, err -} - -func (rpn *retrievalProviderNode) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) { - head, err := rpn.full.ChainHead(ctx) - if err != nil { - return nil, 0, err - } - - return head.Key().Bytes(), head.Height(), nil -} - -// GetRetrievalPricingInput takes a set of candidate storage deals that can serve a retrieval request, -// and returns an minimally populated PricingInput. This PricingInput should be enhanced -// with more data, and passed to the pricing function to determine the final quoted price. -func (rpn *retrievalProviderNode) GetRetrievalPricingInput(ctx context.Context, pieceCID cid.Cid, storageDeals []abi.DealID) (retrievalmarket.PricingInput, error) { - resp := retrievalmarket.PricingInput{} - - head, err := rpn.full.ChainHead(ctx) - if err != nil { - return resp, xerrors.Errorf("failed to get chain head: %w", err) - } - tsk := head.Key() - - var mErr error - - for _, dealID := range storageDeals { - ds, err := rpn.full.StateMarketStorageDeal(ctx, dealID, tsk) - if err != nil { - log.Warnf("failed to look up deal %d on chain: err=%w", dealID, err) - mErr = multierror.Append(mErr, err) - continue - } - if ds.Proposal.VerifiedDeal { - resp.VerifiedDeal = true - } - - if ds.Proposal.PieceCID.Equals(pieceCID) { - resp.PieceSize = ds.Proposal.PieceSize.Unpadded() - } - - // If we've discovered a verified deal with the required PieceCID, we don't need - // to lookup more deals and we're done. - if resp.VerifiedDeal && resp.PieceSize != 0 { - break - } - } - - // Note: The piece size can never actually be zero. We only use it to here - // to assert that we didn't find a matching piece. - if resp.PieceSize == 0 { - if mErr == nil { - return resp, xerrors.New("failed to find matching piece") - } - - return resp, xerrors.Errorf("failed to fetch storage deal state: %w", mErr) - } - - return resp, nil -} diff --git a/markets/retrievaladapter/provider_test.go b/markets/retrievaladapter/provider_test.go deleted file mode 100644 index b7b5039d674..00000000000 --- a/markets/retrievaladapter/provider_test.go +++ /dev/null @@ -1,206 +0,0 @@ -// stm: #unit -package retrievaladapter - -import ( - "context" - "testing" - - "github.com/golang/mock/gomock" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - testnet "github.com/filecoin-project/go-fil-markets/shared_testutil" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/mocks" - "github.com/filecoin-project/lotus/chain/actors/builtin/market" - "github.com/filecoin-project/lotus/chain/types" -) - -func TestGetPricingInput(t *testing.T) { - //stm: @CHAIN_STATE_MARKET_STORAGE_DEAL_001 - ctx := context.Background() - tsk := &types.TipSet{} - key := tsk.Key() - - pcid := testnet.GenerateCids(1)[0] - deals := []abi.DealID{1, 2} - paddedSize := abi.PaddedPieceSize(128) - unpaddedSize := paddedSize.Unpadded() - - tcs := map[string]struct { - pieceCid cid.Cid - deals []abi.DealID - fFnc func(node *mocks.MockFullNode) - - expectedErrorStr string - expectedVerified bool - expectedPieceSize abi.UnpaddedPieceSize - }{ - "error when fails to fetch chain head": { - fFnc: func(n *mocks.MockFullNode) { - n.EXPECT().ChainHead(gomock.Any()).Return(tsk, xerrors.New("chain head error")).Times(1) - }, - expectedErrorStr: "chain head error", - }, - - "error when no piece matches": { - fFnc: func(n *mocks.MockFullNode) { - out1 := &api.MarketDeal{ - Proposal: market.DealProposal{ - PieceCID: testnet.GenerateCids(1)[0], - }, - } - out2 := &api.MarketDeal{ - Proposal: market.DealProposal{ - PieceCID: testnet.GenerateCids(1)[0], - }, - } - - n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1) - gomock.InOrder( - n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, nil), - n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, nil), - ) - - }, - expectedErrorStr: "failed to find matching piece", - }, - - "error when fails to fetch deal state": { - fFnc: func(n *mocks.MockFullNode) { - out1 := &api.MarketDeal{ - Proposal: market.DealProposal{ - PieceCID: pcid, - PieceSize: paddedSize, - }, - } - out2 := &api.MarketDeal{ - Proposal: market.DealProposal{ - PieceCID: testnet.GenerateCids(1)[0], - VerifiedDeal: true, - }, - } - - n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1) - gomock.InOrder( - n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, xerrors.New("error 1")), - n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, xerrors.New("error 2")), - ) - - }, - expectedErrorStr: "failed to fetch storage deal state", - }, - - "verified is true even if one deal is verified and we get the correct piecesize": { - fFnc: func(n *mocks.MockFullNode) { - out1 := &api.MarketDeal{ - Proposal: market.DealProposal{ - PieceCID: pcid, - PieceSize: paddedSize, - }, - } - out2 := &api.MarketDeal{ - Proposal: market.DealProposal{ - PieceCID: testnet.GenerateCids(1)[0], - VerifiedDeal: true, - }, - } - - n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1) - gomock.InOrder( - n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, nil), - n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, nil), - ) - - }, - expectedPieceSize: unpaddedSize, - expectedVerified: true, - }, - - "success even if one deal state fetch errors out but the other deal is verified and has the required piececid": { - fFnc: func(n *mocks.MockFullNode) { - out1 := &api.MarketDeal{ - Proposal: market.DealProposal{ - PieceCID: testnet.GenerateCids(1)[0], - }, - } - out2 := &api.MarketDeal{ - Proposal: market.DealProposal{ - PieceCID: pcid, - PieceSize: paddedSize, - VerifiedDeal: true, - }, - } - - n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1) - gomock.InOrder( - n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, xerrors.New("some error")), - n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, nil), - ) - - }, - expectedPieceSize: unpaddedSize, - expectedVerified: true, - }, - - "verified is false if both deals are unverified and we get the correct piece size": { - fFnc: func(n *mocks.MockFullNode) { - out1 := &api.MarketDeal{ - Proposal: market.DealProposal{ - PieceCID: pcid, - PieceSize: paddedSize, - VerifiedDeal: false, - }, - } - out2 := &api.MarketDeal{ - Proposal: market.DealProposal{ - PieceCID: testnet.GenerateCids(1)[0], - VerifiedDeal: false, - }, - } - - n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1) - gomock.InOrder( - n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, nil), - n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, nil), - ) - - }, - expectedPieceSize: unpaddedSize, - expectedVerified: false, - }, - } - - for name, tc := range tcs { - tc := tc - t.Run(name, func(t *testing.T) { - mockCtrl := gomock.NewController(t) - // when test is done, assert expectations on all mock objects. - defer mockCtrl.Finish() - - mockFull := mocks.NewMockFullNode(mockCtrl) - rpn := &retrievalProviderNode{ - full: mockFull, - } - if tc.fFnc != nil { - tc.fFnc(mockFull) - } - - resp, err := rpn.GetRetrievalPricingInput(ctx, pcid, deals) - - if tc.expectedErrorStr != "" { - require.Error(t, err) - require.Contains(t, err.Error(), tc.expectedErrorStr) - require.Equal(t, retrievalmarket.PricingInput{}, resp) - } else { - require.NoError(t, err) - require.Equal(t, tc.expectedPieceSize, resp.PieceSize) - require.Equal(t, tc.expectedVerified, resp.VerifiedDeal) - } - }) - } -} diff --git a/markets/sectoraccessor/sectoraccessor.go b/markets/sectoraccessor/sectoraccessor.go deleted file mode 100644 index 9b709d3b5ff..00000000000 --- a/markets/sectoraccessor/sectoraccessor.go +++ /dev/null @@ -1,136 +0,0 @@ -package sectoraccessor - -import ( - "context" - "io" - - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/dagstore/mount" - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/v1api" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/markets/dagstore" - "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/storage/sealer" - "github.com/filecoin-project/lotus/storage/sealer/storiface" - "github.com/filecoin-project/lotus/storage/sectorblocks" -) - -var log = logging.Logger("sectoraccessor") - -type sectorAccessor struct { - maddr address.Address - secb sectorblocks.SectorBuilder - pp sealer.PieceProvider - full v1api.FullNode -} - -var _ retrievalmarket.SectorAccessor = (*sectorAccessor)(nil) - -func NewSectorAccessor(maddr dtypes.MinerAddress, secb sectorblocks.SectorBuilder, pp sealer.PieceProvider, full v1api.FullNode) dagstore.SectorAccessor { - return §orAccessor{address.Address(maddr), secb, pp, full} -} - -func (sa *sectorAccessor) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) { - return sa.UnsealSectorAt(ctx, sectorID, pieceOffset, length) -} - -func (sa *sectorAccessor) UnsealSectorAt(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (mount.Reader, error) { - log.Debugf("get sector %d, pieceOffset %d, length %d", sectorID, pieceOffset, length) - si, err := sa.sectorsStatus(ctx, sectorID, false) - if err != nil { - return nil, err - } - - mid, err := address.IDFromAddress(sa.maddr) - if err != nil { - return nil, err - } - - ref := storiface.SectorRef{ - ID: abi.SectorID{ - Miner: abi.ActorID(mid), - Number: sectorID, - }, - ProofType: si.SealProof, - } - - var commD cid.Cid - if si.CommD != nil { - commD = *si.CommD - } - - // Get a reader for the piece, unsealing the piece if necessary - log.Debugf("read piece in sector %d, pieceOffset %d, length %d from miner %d", sectorID, pieceOffset, length, mid) - r, unsealed, err := sa.pp.ReadPiece(ctx, ref, storiface.UnpaddedByteIndex(pieceOffset), length, si.Ticket.Value, commD) - if err != nil { - return nil, xerrors.Errorf("failed to unseal piece from sector %d: %w", sectorID, err) - } - _ = unsealed // todo: use - - return r, nil -} - -func (sa *sectorAccessor) IsUnsealed(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) { - si, err := sa.sectorsStatus(ctx, sectorID, true) - if err != nil { - return false, xerrors.Errorf("failed to get sector info: %w", err) - } - - mid, err := address.IDFromAddress(sa.maddr) - if err != nil { - return false, err - } - - ref := storiface.SectorRef{ - ID: abi.SectorID{ - Miner: abi.ActorID(mid), - Number: sectorID, - }, - ProofType: si.SealProof, - } - - log.Debugf("will call IsUnsealed now sector=%+v, offset=%d, size=%d", sectorID, offset, length) - return sa.pp.IsUnsealed(ctx, ref, storiface.UnpaddedByteIndex(offset), length) -} - -func (sa *sectorAccessor) sectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) { - sInfo, err := sa.secb.SectorsStatus(ctx, sid, false) - if err != nil { - return api.SectorInfo{}, err - } - - if !showOnChainInfo { - return sInfo, nil - } - - onChainInfo, err := sa.full.StateSectorGetInfo(ctx, sa.maddr, sid, types.EmptyTSK) - if err != nil { - return sInfo, err - } - if onChainInfo == nil { - return sInfo, nil - } - sInfo.SealProof = onChainInfo.SealProof - sInfo.Activation = onChainInfo.Activation - sInfo.Expiration = onChainInfo.Expiration - sInfo.DealWeight = onChainInfo.DealWeight - sInfo.VerifiedDealWeight = onChainInfo.VerifiedDealWeight - sInfo.InitialPledge = onChainInfo.InitialPledge - - ex, err := sa.full.StateSectorExpiration(ctx, sa.maddr, sid, types.EmptyTSK) - if err != nil { - return sInfo, nil - } - sInfo.OnTime = ex.OnTime - sInfo.Early = ex.Early - - return sInfo, nil -} diff --git a/markets/storageadapter/api.go b/markets/storageadapter/api.go deleted file mode 100644 index b93ffdfbb16..00000000000 --- a/markets/storageadapter/api.go +++ /dev/null @@ -1,55 +0,0 @@ -package storageadapter - -import ( - "context" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - - "github.com/filecoin-project/lotus/blockstore" - "github.com/filecoin-project/lotus/chain/actors/adt" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" -) - -type apiWrapper struct { - api interface { - StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) - ChainReadObj(context.Context, cid.Cid) ([]byte, error) - ChainHasObj(context.Context, cid.Cid) (bool, error) - ChainPutObj(context.Context, blocks.Block) error - } -} - -func (ca *apiWrapper) diffPreCommits(ctx context.Context, actor address.Address, pre, cur types.TipSetKey) (*miner.PreCommitChanges, error) { - store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(ca.api))) - - preAct, err := ca.api.StateGetActor(ctx, actor, pre) - if err != nil { - return nil, xerrors.Errorf("getting pre actor: %w", err) - } - curAct, err := ca.api.StateGetActor(ctx, actor, cur) - if err != nil { - return nil, xerrors.Errorf("getting cur actor: %w", err) - } - - preSt, err := miner.Load(store, preAct) - if err != nil { - return nil, xerrors.Errorf("loading miner actor: %w", err) - } - curSt, err := miner.Load(store, curAct) - if err != nil { - return nil, xerrors.Errorf("loading miner actor: %w", err) - } - - diff, err := miner.DiffPreCommits(preSt, curSt) - if err != nil { - return nil, xerrors.Errorf("diff precommits: %w", err) - } - - return diff, err -} diff --git a/markets/storageadapter/client.go b/markets/storageadapter/client.go deleted file mode 100644 index eaff4e166a3..00000000000 --- a/markets/storageadapter/client.go +++ /dev/null @@ -1,446 +0,0 @@ -package storageadapter - -// this file implements storagemarket.StorageClientNode - -import ( - "bytes" - "context" - - "github.com/ipfs/go-cid" - "go.uber.org/fx" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - cborutil "github.com/filecoin-project/go-cbor-util" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/go-state-types/exitcode" - builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - marketactor "github.com/filecoin-project/lotus/chain/actors/builtin/market" - "github.com/filecoin-project/lotus/chain/events" - "github.com/filecoin-project/lotus/chain/events/state" - "github.com/filecoin-project/lotus/chain/market" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/sigs" - "github.com/filecoin-project/lotus/markets/utils" - "github.com/filecoin-project/lotus/node/impl/full" - "github.com/filecoin-project/lotus/node/modules/helpers" -) - -type ClientNodeAdapter struct { - *clientApi - - fundmgr *market.FundManager - ev *events.Events - dsMatcher *dealStateMatcher - scMgr *SectorCommittedManager -} - -type clientApi struct { - full.ChainAPI - full.StateAPI - full.MpoolAPI -} - -func NewClientNodeAdapter(mctx helpers.MetricsCtx, lc fx.Lifecycle, stateapi full.StateAPI, chain full.ChainAPI, mpool full.MpoolAPI, fundmgr *market.FundManager) (storagemarket.StorageClientNode, error) { - capi := &clientApi{chain, stateapi, mpool} - ctx := helpers.LifecycleCtx(mctx, lc) - - ev, err := events.NewEvents(ctx, capi) - if err != nil { - return nil, err - } - a := &ClientNodeAdapter{ - clientApi: capi, - - fundmgr: fundmgr, - ev: ev, - dsMatcher: newDealStateMatcher(state.NewStatePredicates(state.WrapFastAPI(capi))), - } - a.scMgr = NewSectorCommittedManager(ev, a, &apiWrapper{api: capi}) - return a, nil -} - -func (c *ClientNodeAdapter) ListStorageProviders(ctx context.Context, encodedTs shared.TipSetToken) ([]*storagemarket.StorageProviderInfo, error) { - tsk, err := types.TipSetKeyFromBytes(encodedTs) - if err != nil { - return nil, err - } - - addresses, err := c.StateListMiners(ctx, tsk) - if err != nil { - return nil, err - } - - var out []*storagemarket.StorageProviderInfo - - for _, addr := range addresses { - mi, err := c.GetMinerInfo(ctx, addr, encodedTs) - if err != nil { - return nil, err - } - - out = append(out, mi) - } - - return out, nil -} - -func (c *ClientNodeAdapter) VerifySignature(ctx context.Context, sig crypto.Signature, addr address.Address, input []byte, encodedTs shared.TipSetToken) (bool, error) { - addr, err := c.StateAccountKey(ctx, addr, types.EmptyTSK) - if err != nil { - return false, err - } - - err = sigs.Verify(&sig, addr, input) - return err == nil, err -} - -// Adds funds with the StorageMinerActor for a storage participant. Used by both providers and clients. -func (c *ClientNodeAdapter) AddFunds(ctx context.Context, addr address.Address, amount abi.TokenAmount) (cid.Cid, error) { - // (Provider Node API) - smsg, err := c.MpoolPushMessage(ctx, &types.Message{ - To: marketactor.Address, - From: addr, - Value: amount, - Method: builtin6.MethodsMarket.AddBalance, - }, nil) - if err != nil { - return cid.Undef, err - } - - return smsg.Cid(), nil -} - -func (c *ClientNodeAdapter) ReserveFunds(ctx context.Context, wallet, addr address.Address, amt abi.TokenAmount) (cid.Cid, error) { - return c.fundmgr.Reserve(ctx, wallet, addr, amt) -} - -func (c *ClientNodeAdapter) ReleaseFunds(ctx context.Context, addr address.Address, amt abi.TokenAmount) error { - return c.fundmgr.Release(addr, amt) -} - -func (c *ClientNodeAdapter) GetBalance(ctx context.Context, addr address.Address, encodedTs shared.TipSetToken) (storagemarket.Balance, error) { - tsk, err := types.TipSetKeyFromBytes(encodedTs) - if err != nil { - return storagemarket.Balance{}, err - } - - bal, err := c.StateMarketBalance(ctx, addr, tsk) - if err != nil { - return storagemarket.Balance{}, err - } - - return utils.ToSharedBalance(bal), nil -} - -// ValidatePublishedDeal validates that the provided deal has appeared on chain and references the same ClientDeal -// returns the Deal id if there is no error -// TODO: Don't return deal ID -func (c *ClientNodeAdapter) ValidatePublishedDeal(ctx context.Context, deal storagemarket.ClientDeal) (abi.DealID, error) { - log.Infow("DEAL ACCEPTED!") - - pubmsg, err := c.ChainGetMessage(ctx, *deal.PublishMessage) - if err != nil { - return 0, xerrors.Errorf("getting deal publish message: %w", err) - } - - mi, err := c.StateMinerInfo(ctx, deal.Proposal.Provider, types.EmptyTSK) - if err != nil { - return 0, xerrors.Errorf("getting miner worker failed: %w", err) - } - - fromid, err := c.StateLookupID(ctx, pubmsg.From, types.EmptyTSK) - if err != nil { - return 0, xerrors.Errorf("failed to resolve from msg ID addr: %w", err) - } - - var pubOk bool - pubAddrs := append([]address.Address{mi.Worker, mi.Owner}, mi.ControlAddresses...) - for _, a := range pubAddrs { - if fromid == a { - pubOk = true - break - } - } - - if !pubOk { - return 0, xerrors.Errorf("deal wasn't published by storage provider: from=%s, provider=%s,%+v", pubmsg.From, deal.Proposal.Provider, pubAddrs) - } - - if pubmsg.To != marketactor.Address { - return 0, xerrors.Errorf("deal publish message wasn't set to StorageMarket actor (to=%s)", pubmsg.To) - } - - if pubmsg.Method != builtin6.MethodsMarket.PublishStorageDeals { - return 0, xerrors.Errorf("deal publish message called incorrect method (method=%s)", pubmsg.Method) - } - - var params markettypes.PublishStorageDealsParams - if err := params.UnmarshalCBOR(bytes.NewReader(pubmsg.Params)); err != nil { - return 0, err - } - - dealIdx := -1 - for i, storageDeal := range params.Deals { - // TODO: make it less hacky - sd := storageDeal - eq, err := cborutil.Equals(&deal.ClientDealProposal, &sd) - if err != nil { - return 0, err - } - if eq { - dealIdx = i - break - } - } - - if dealIdx == -1 { - return 0, xerrors.Errorf("deal publish didn't contain our deal (message cid: %s)", deal.PublishMessage) - } - - // TODO: timeout - ret, err := c.StateWaitMsg(ctx, *deal.PublishMessage, build.MessageConfidence, api.LookbackNoLimit, true) - if err != nil { - return 0, xerrors.Errorf("waiting for deal publish message: %w", err) - } - if ret.Receipt.ExitCode != 0 { - return 0, xerrors.Errorf("deal publish failed: exit=%d", ret.Receipt.ExitCode) - } - - nv, err := c.StateNetworkVersion(ctx, ret.TipSet) - if err != nil { - return 0, xerrors.Errorf("getting network version: %w", err) - } - - res, err := marketactor.DecodePublishStorageDealsReturn(ret.Receipt.Return, nv) - if err != nil { - return 0, xerrors.Errorf("decoding deal publish return: %w", err) - } - - dealIDs, err := res.DealIDs() - if err != nil { - return 0, xerrors.Errorf("getting dealIDs: %w", err) - } - - if dealIdx >= len(params.Deals) { - return 0, xerrors.Errorf( - "deal index %d out of bounds of deals (len %d) in publish deals message %s", - dealIdx, len(params.Deals), pubmsg.Cid()) - } - - valid, outIdx, err := res.IsDealValid(uint64(dealIdx)) - if err != nil { - return 0, xerrors.Errorf("determining deal validity: %w", err) - } - - if !valid { - return 0, xerrors.New("deal was invalid at publication") - } - - return dealIDs[outIdx], nil -} - -var clientOverestimation = struct { - numerator int64 - denominator int64 -}{ - numerator: 12, - denominator: 10, -} - -func (c *ClientNodeAdapter) DealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, isVerified bool) (abi.TokenAmount, abi.TokenAmount, error) { - bounds, err := c.StateDealProviderCollateralBounds(ctx, size, isVerified, types.EmptyTSK) - if err != nil { - return abi.TokenAmount{}, abi.TokenAmount{}, err - } - - min := big.Mul(bounds.Min, big.NewInt(clientOverestimation.numerator)) - min = big.Div(min, big.NewInt(clientOverestimation.denominator)) - return min, bounds.Max, nil -} - -// TODO: Remove dealID parameter, change publishCid to be cid.Cid (instead of pointer) -func (c *ClientNodeAdapter) OnDealSectorPreCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, proposal markettypes.DealProposal, publishCid *cid.Cid, cb storagemarket.DealSectorPreCommittedCallback) error { - return c.scMgr.OnDealSectorPreCommitted(ctx, provider, proposal, *publishCid, cb) -} - -// TODO: Remove dealID parameter, change publishCid to be cid.Cid (instead of pointer) -func (c *ClientNodeAdapter) OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, sectorNumber abi.SectorNumber, proposal markettypes.DealProposal, publishCid *cid.Cid, cb storagemarket.DealSectorCommittedCallback) error { - return c.scMgr.OnDealSectorCommitted(ctx, provider, sectorNumber, proposal, *publishCid, cb) -} - -// TODO: Replace dealID parameter with DealProposal -func (c *ClientNodeAdapter) OnDealExpiredOrSlashed(ctx context.Context, dealID abi.DealID, onDealExpired storagemarket.DealExpiredCallback, onDealSlashed storagemarket.DealSlashedCallback) error { - head, err := c.ChainHead(ctx) - if err != nil { - return xerrors.Errorf("client: failed to get chain head: %w", err) - } - - sd, err := c.StateMarketStorageDeal(ctx, dealID, head.Key()) - if err != nil { - return xerrors.Errorf("client: failed to look up deal %d on chain: %w", dealID, err) - } - - // Called immediately to check if the deal has already expired or been slashed - checkFunc := func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) { - if ts == nil { - // keep listening for events - return false, true, nil - } - - // Check if the deal has already expired - if sd.Proposal.EndEpoch <= ts.Height() { - onDealExpired(nil) - return true, false, nil - } - - // If there is no deal assume it's already been slashed - if sd.State.SectorStartEpoch < 0 { - onDealSlashed(ts.Height(), nil) - return true, false, nil - } - - // No events have occurred yet, so return - // done: false, more: true (keep listening for events) - return false, true, nil - } - - // Called when there was a match against the state change we're looking for - // and the chain has advanced to the confidence height - stateChanged := func(ts *types.TipSet, ts2 *types.TipSet, states events.StateChange, h abi.ChainEpoch) (more bool, err error) { - // Check if the deal has already expired - if ts2 == nil || sd.Proposal.EndEpoch <= ts2.Height() { - onDealExpired(nil) - return false, nil - } - - // Timeout waiting for state change - if states == nil { - log.Error("timed out waiting for deal expiry") - return false, nil - } - - changedDeals, ok := states.(state.ChangedDeals) - if !ok { - panic("Expected state.ChangedDeals") - } - - deal, ok := changedDeals[dealID] - if !ok { - // No change to deal - return true, nil - } - - // Deal was slashed - if deal.To == nil { - onDealSlashed(ts2.Height(), nil) - return false, nil - } - - return true, nil - } - - // Called when there was a chain reorg and the state change was reverted - revert := func(ctx context.Context, ts *types.TipSet) error { - // TODO: Is it ok to just ignore this? - log.Warn("deal state reverted; TODO: actually handle this!") - return nil - } - - // Watch for state changes to the deal - match := c.dsMatcher.matcher(ctx, dealID) - - // Wait until after the end epoch for the deal and then timeout - timeout := (sd.Proposal.EndEpoch - head.Height()) + 1 - if err := c.ev.StateChanged(checkFunc, stateChanged, revert, int(build.MessageConfidence)+1, timeout, match); err != nil { - return xerrors.Errorf("failed to set up state changed handler: %w", err) - } - - return nil -} - -func (c *ClientNodeAdapter) SignProposal(ctx context.Context, signer address.Address, proposal markettypes.DealProposal) (*markettypes.ClientDealProposal, error) { - // TODO: output spec signed proposal - buf, err := cborutil.Dump(&proposal) - if err != nil { - return nil, err - } - - signer, err = c.StateAccountKey(ctx, signer, types.EmptyTSK) - if err != nil { - return nil, err - } - - sig, err := c.Wallet.WalletSign(ctx, signer, buf, api.MsgMeta{ - Type: api.MTDealProposal, - }) - if err != nil { - return nil, err - } - - return &markettypes.ClientDealProposal{ - Proposal: proposal, - ClientSignature: *sig, - }, nil -} - -func (c *ClientNodeAdapter) GetDefaultWalletAddress(ctx context.Context) (address.Address, error) { - addr, err := c.DefWallet.GetDefault() - return addr, err -} - -func (c *ClientNodeAdapter) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) { - head, err := c.ChainHead(ctx) - if err != nil { - return nil, 0, err - } - - return head.Key().Bytes(), head.Height(), nil -} - -func (c *ClientNodeAdapter) WaitForMessage(ctx context.Context, mcid cid.Cid, cb func(code exitcode.ExitCode, bytes []byte, finalCid cid.Cid, err error) error) error { - receipt, err := c.StateWaitMsg(ctx, mcid, build.MessageConfidence, api.LookbackNoLimit, true) - if err != nil { - return cb(0, nil, cid.Undef, err) - } - return cb(receipt.Receipt.ExitCode, receipt.Receipt.Return, receipt.Message, nil) -} - -func (c *ClientNodeAdapter) GetMinerInfo(ctx context.Context, addr address.Address, encodedTs shared.TipSetToken) (*storagemarket.StorageProviderInfo, error) { - tsk, err := types.TipSetKeyFromBytes(encodedTs) - if err != nil { - return nil, err - } - mi, err := c.StateMinerInfo(ctx, addr, tsk) - if err != nil { - return nil, err - } - - out := utils.NewStorageProviderInfo(addr, mi.Worker, mi.SectorSize, *mi.PeerId, mi.Multiaddrs) - return &out, nil -} - -func (c *ClientNodeAdapter) SignBytes(ctx context.Context, signer address.Address, b []byte) (*crypto.Signature, error) { - signer, err := c.StateAccountKey(ctx, signer, types.EmptyTSK) - if err != nil { - return nil, err - } - - localSignature, err := c.Wallet.WalletSign(ctx, signer, b, api.MsgMeta{ - Type: api.MTUnknown, // TODO: pass type here - }) - if err != nil { - return nil, err - } - return localSignature, nil -} - -var _ storagemarket.StorageClientNode = &ClientNodeAdapter{} diff --git a/markets/storageadapter/client_blockstore.go b/markets/storageadapter/client_blockstore.go deleted file mode 100644 index dc7e3f82a62..00000000000 --- a/markets/storageadapter/client_blockstore.go +++ /dev/null @@ -1,102 +0,0 @@ -package storageadapter - -import ( - "sync" - - blockstore "github.com/ipfs/boxo/blockstore" - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-fil-markets/stores" - - "github.com/filecoin-project/lotus/node/repo/imports" -) - -// ProxyBlockstoreAccessor is an accessor that returns a fixed blockstore. -// To be used in combination with IPFS integration. -type ProxyBlockstoreAccessor struct { - Blockstore blockstore.Blockstore -} - -var _ storagemarket.BlockstoreAccessor = (*ProxyBlockstoreAccessor)(nil) - -func NewFixedBlockstoreAccessor(bs blockstore.Blockstore) storagemarket.BlockstoreAccessor { - return &ProxyBlockstoreAccessor{Blockstore: bs} -} - -func (p *ProxyBlockstoreAccessor) Get(cid storagemarket.PayloadCID) (blockstore.Blockstore, error) { - return p.Blockstore, nil -} - -func (p *ProxyBlockstoreAccessor) Done(cid storagemarket.PayloadCID) error { - return nil -} - -// ImportsBlockstoreAccessor is a blockstore accessor backed by the -// imports.Manager. -type ImportsBlockstoreAccessor struct { - m *imports.Manager - lk sync.Mutex - open map[cid.Cid]struct { - st stores.ClosableBlockstore - refs int - } -} - -var _ storagemarket.BlockstoreAccessor = (*ImportsBlockstoreAccessor)(nil) - -func NewImportsBlockstoreAccessor(importmgr *imports.Manager) *ImportsBlockstoreAccessor { - return &ImportsBlockstoreAccessor{ - m: importmgr, - open: make(map[cid.Cid]struct { - st stores.ClosableBlockstore - refs int - }), - } -} - -func (s *ImportsBlockstoreAccessor) Get(payloadCID storagemarket.PayloadCID) (blockstore.Blockstore, error) { - s.lk.Lock() - defer s.lk.Unlock() - - e, ok := s.open[payloadCID] - if ok { - e.refs++ - return e.st, nil - } - - path, err := s.m.CARPathFor(payloadCID) - if err != nil { - return nil, xerrors.Errorf("failed to get client blockstore for root %s: %w", payloadCID, err) - } - if path == "" { - return nil, xerrors.Errorf("no client blockstore for root %s", payloadCID) - } - ret, err := stores.ReadOnlyFilestore(path) - if err != nil { - return nil, err - } - e.st = ret - s.open[payloadCID] = e - return ret, nil -} - -func (s *ImportsBlockstoreAccessor) Done(payloadCID storagemarket.PayloadCID) error { - s.lk.Lock() - defer s.lk.Unlock() - - e, ok := s.open[payloadCID] - if !ok { - return nil - } - - e.refs-- - if e.refs == 0 { - if err := e.st.Close(); err != nil { - log.Warnf("failed to close blockstore: %s", err) - } - delete(s.open, payloadCID) - } - return nil -} diff --git a/markets/storageadapter/dealpublisher.go b/markets/storageadapter/dealpublisher.go deleted file mode 100644 index 6a274e593f4..00000000000 --- a/markets/storageadapter/dealpublisher.go +++ /dev/null @@ -1,466 +0,0 @@ -package storageadapter - -import ( - "context" - "fmt" - "strings" - "sync" - "time" - - "github.com/ipfs/go-cid" - "go.uber.org/fx" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v9/market" - "github.com/filecoin-project/go-state-types/exitcode" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/storage/ctladdr" -) - -type dealPublisherAPI interface { - ChainHead(context.Context) (*types.TipSet, error) - MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) - StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) - - WalletBalance(context.Context, address.Address) (types.BigInt, error) - WalletHas(context.Context, address.Address) (bool, error) - StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) - StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) - StateCall(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error) -} - -// DealPublisher batches deal publishing so that many deals can be included in -// a single publish message. This saves gas for miners that publish deals -// frequently. -// When a deal is submitted, the DealPublisher waits a configurable amount of -// time for other deals to be submitted before sending the publish message. -// There is a configurable maximum number of deals that can be included in one -// message. When the limit is reached the DealPublisher immediately submits a -// publish message with all deals in the queue. -type DealPublisher struct { - api dealPublisherAPI - as *ctladdr.AddressSelector - - ctx context.Context - Shutdown context.CancelFunc - - maxDealsPerPublishMsg uint64 - publishPeriod time.Duration - publishSpec *api.MessageSendSpec - - lk sync.Mutex - pending []*pendingDeal - cancelWaitForMoreDeals context.CancelFunc - publishPeriodStart time.Time - startEpochSealingBuffer abi.ChainEpoch -} - -// A deal that is queued to be published -type pendingDeal struct { - ctx context.Context - deal market.ClientDealProposal - Result chan publishResult -} - -// The result of publishing a deal -type publishResult struct { - msgCid cid.Cid - err error -} - -func newPendingDeal(ctx context.Context, deal market.ClientDealProposal) *pendingDeal { - return &pendingDeal{ - ctx: ctx, - deal: deal, - Result: make(chan publishResult), - } -} - -type PublishMsgConfig struct { - // The amount of time to wait for more deals to arrive before - // publishing - Period time.Duration - // The maximum number of deals to include in a single PublishStorageDeals - // message - MaxDealsPerMsg uint64 - // Minimum start epoch buffer to give time for sealing of sector with deal - StartEpochSealingBuffer uint64 -} - -func NewDealPublisher( - feeConfig *config.MinerFeeConfig, - publishMsgCfg PublishMsgConfig, -) func(lc fx.Lifecycle, full api.FullNode, as *ctladdr.AddressSelector) *DealPublisher { - return func(lc fx.Lifecycle, full api.FullNode, as *ctladdr.AddressSelector) *DealPublisher { - maxFee := abi.NewTokenAmount(0) - if feeConfig != nil { - maxFee = abi.TokenAmount(feeConfig.MaxPublishDealsFee) - } - publishSpec := &api.MessageSendSpec{MaxFee: maxFee} - dp := newDealPublisher(full, as, publishMsgCfg, publishSpec) - lc.Append(fx.Hook{ - OnStop: func(ctx context.Context) error { - dp.Shutdown() - return nil - }, - }) - return dp - } -} - -func newDealPublisher( - dpapi dealPublisherAPI, - as *ctladdr.AddressSelector, - publishMsgCfg PublishMsgConfig, - publishSpec *api.MessageSendSpec, -) *DealPublisher { - ctx, cancel := context.WithCancel(context.Background()) - return &DealPublisher{ - api: dpapi, - as: as, - ctx: ctx, - Shutdown: cancel, - maxDealsPerPublishMsg: publishMsgCfg.MaxDealsPerMsg, - publishPeriod: publishMsgCfg.Period, - startEpochSealingBuffer: abi.ChainEpoch(publishMsgCfg.StartEpochSealingBuffer), - publishSpec: publishSpec, - } -} - -// PendingDeals returns the list of deals that are queued up to be published -func (p *DealPublisher) PendingDeals() api.PendingDealInfo { - p.lk.Lock() - defer p.lk.Unlock() - - // Filter out deals whose context has been cancelled - deals := make([]*pendingDeal, 0, len(p.pending)) - for _, dl := range p.pending { - if dl.ctx.Err() == nil { - deals = append(deals, dl) - } - } - - pending := make([]market.ClientDealProposal, len(deals)) - for i, deal := range deals { - pending[i] = deal.deal - } - - return api.PendingDealInfo{ - Deals: pending, - PublishPeriodStart: p.publishPeriodStart, - PublishPeriod: p.publishPeriod, - } -} - -// ForcePublishPendingDeals publishes all pending deals without waiting for -// the publish period to elapse -func (p *DealPublisher) ForcePublishPendingDeals() { - p.lk.Lock() - defer p.lk.Unlock() - - log.Infof("force publishing deals") - p.publishAllDeals() -} - -func (p *DealPublisher) Publish(ctx context.Context, deal market.ClientDealProposal) (cid.Cid, error) { - pdeal := newPendingDeal(ctx, deal) - - // Add the deal to the queue - p.processNewDeal(pdeal) - - // Wait for the deal to be submitted - select { - case <-ctx.Done(): - return cid.Undef, ctx.Err() - case res := <-pdeal.Result: - return res.msgCid, res.err - } -} - -func (p *DealPublisher) processNewDeal(pdeal *pendingDeal) { - p.lk.Lock() - defer p.lk.Unlock() - - // Filter out any cancelled deals - p.filterCancelledDeals() - - // If all deals have been cancelled, clear the wait-for-deals timer - if len(p.pending) == 0 && p.cancelWaitForMoreDeals != nil { - p.cancelWaitForMoreDeals() - p.cancelWaitForMoreDeals = nil - } - - // Make sure the new deal hasn't been cancelled - if pdeal.ctx.Err() != nil { - return - } - - pdealPropCid, err := pdeal.deal.Proposal.Cid() - if err != nil { - log.Warn("failed to calculate proposal CID for new pending Deal with piece cid %s", pdeal.deal.Proposal.PieceCID) - return - } - - // Sanity check that new deal isn't already in the queue - for _, pd := range p.pending { - pdPropCid, err := pd.deal.Proposal.Cid() - if err != nil { - log.Warn("failed to calculate proposal CID for pending Deal already in publish queue with piece cid %s", pd.deal.Proposal.PieceCID) - return - } - - if pdPropCid.Equals(pdealPropCid) { - log.Warn("tried to process new pending deal with piece CID %s that is already in publish queue; returning", pdeal.deal.Proposal.PieceCID) - return - } - } - - // Add the new deal to the queue - p.pending = append(p.pending, pdeal) - log.Infof("add deal with piece CID %s to publish deals queue - %d deals in queue (max queue size %d)", - pdeal.deal.Proposal.PieceCID, len(p.pending), p.maxDealsPerPublishMsg) - - // If the maximum number of deals per message has been reached or we're not batching, send a - // publish message - if uint64(len(p.pending)) >= p.maxDealsPerPublishMsg || p.publishPeriod == 0 { - log.Infof("publish deals queue has reached max size of %d, publishing deals", p.maxDealsPerPublishMsg) - p.publishAllDeals() - return - } - - // Otherwise wait for more deals to arrive or the timeout to be reached - p.waitForMoreDeals() -} - -func (p *DealPublisher) waitForMoreDeals() { - // Check if we're already waiting for deals - if !p.publishPeriodStart.IsZero() { - elapsed := build.Clock.Since(p.publishPeriodStart) - log.Infof("%s elapsed of / %s until publish deals queue is published", - elapsed, p.publishPeriod) - return - } - - // Set a timeout to wait for more deals to arrive - log.Infof("waiting publish deals queue period of %s before publishing", p.publishPeriod) - ctx, cancel := context.WithCancel(p.ctx) - - // Create the timer _before_ taking the current time so publishPeriod+timeout is always >= - // the actual timer timeout. - timer := build.Clock.Timer(p.publishPeriod) - - p.publishPeriodStart = build.Clock.Now() - p.cancelWaitForMoreDeals = cancel - - go func() { - select { - case <-ctx.Done(): - timer.Stop() - case <-timer.C: - p.lk.Lock() - defer p.lk.Unlock() - - // The timeout has expired so publish all pending deals - log.Infof("publish deals queue period of %s has expired, publishing deals", p.publishPeriod) - p.publishAllDeals() - } - }() -} - -func (p *DealPublisher) publishAllDeals() { - // If the timeout hasn't yet been cancelled, cancel it - if p.cancelWaitForMoreDeals != nil { - p.cancelWaitForMoreDeals() - p.cancelWaitForMoreDeals = nil - p.publishPeriodStart = time.Time{} - } - - // Filter out any deals that have been cancelled - p.filterCancelledDeals() - deals := p.pending - p.pending = nil - - // Send the publish message - go p.publishReady(deals) -} - -func (p *DealPublisher) publishReady(ready []*pendingDeal) { - if len(ready) == 0 { - return - } - - // onComplete is called when the publish message has been sent or there - // was an error - onComplete := func(pd *pendingDeal, msgCid cid.Cid, err error) { - // Send the publish result on the pending deal's Result channel - res := publishResult{ - msgCid: msgCid, - err: err, - } - select { - case <-p.ctx.Done(): - case <-pd.ctx.Done(): - case pd.Result <- res: - } - } - - // Validate each deal to make sure it can be published - validated := make([]*pendingDeal, 0, len(ready)) - deals := make([]market.ClientDealProposal, 0, len(ready)) - for _, pd := range ready { - // Validate the deal - if err := p.validateDeal(pd.deal); err != nil { - // Validation failed, complete immediately with an error - go onComplete(pd, cid.Undef, xerrors.Errorf("publish validation failed: %w", err)) - continue - } - - validated = append(validated, pd) - deals = append(deals, pd.deal) - } - - // Send the publish message - msgCid, err := p.publishDealProposals(deals) - - // Signal that each deal has been published - for _, pd := range validated { - go onComplete(pd, msgCid, err) - } -} - -// validateDeal checks that the deal proposal start epoch hasn't already -// elapsed -func (p *DealPublisher) validateDeal(deal market.ClientDealProposal) error { - start := time.Now() - - pcid, err := deal.Proposal.Cid() - if err != nil { - return xerrors.Errorf("computing proposal cid: %w", err) - } - - head, err := p.api.ChainHead(p.ctx) - if err != nil { - return err - } - if head.Height()+p.startEpochSealingBuffer > deal.Proposal.StartEpoch { - return xerrors.Errorf( - "cannot publish deal with piece CID %s: current epoch %d has passed deal proposal start epoch %d", - deal.Proposal.PieceCID, head.Height(), deal.Proposal.StartEpoch) - } - - mi, err := p.api.StateMinerInfo(p.ctx, deal.Proposal.Provider, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting provider info: %w", err) - } - - params, err := actors.SerializeParams(&market.PublishStorageDealsParams{ - Deals: []market.ClientDealProposal{deal}, - }) - if err != nil { - return xerrors.Errorf("serializing PublishStorageDeals params failed: %w", err) - } - - addr, _, err := p.as.AddressFor(p.ctx, p.api, mi, api.DealPublishAddr, big.Zero(), big.Zero()) - if err != nil { - return xerrors.Errorf("selecting address for publishing deals: %w", err) - } - - res, err := p.api.StateCall(p.ctx, &types.Message{ - To: builtin.StorageMarketActorAddr, - From: addr, - Value: types.NewInt(0), - Method: builtin.MethodsMarket.PublishStorageDeals, - Params: params, - }, head.Key()) - if err != nil { - return xerrors.Errorf("simulating deal publish message: %w", err) - } - if res.MsgRct.ExitCode != exitcode.Ok { - return xerrors.Errorf("simulating deal publish message: non-zero exitcode %s; message: %s", res.MsgRct.ExitCode, res.Error) - } - - took := time.Now().Sub(start) - log.Infow("validating deal", "took", took, "proposal", pcid) - - return nil -} - -// Sends the publish message -func (p *DealPublisher) publishDealProposals(deals []market.ClientDealProposal) (cid.Cid, error) { - if len(deals) == 0 { - return cid.Undef, nil - } - - log.Infof("publishing %d deals in publish deals queue with piece CIDs: %s", len(deals), pieceCids(deals)) - - provider := deals[0].Proposal.Provider - for _, dl := range deals { - if dl.Proposal.Provider != provider { - msg := fmt.Sprintf("publishing %d deals failed: ", len(deals)) + - "not all deals are for same provider: " + - fmt.Sprintf("deal with piece CID %s is for provider %s ", deals[0].Proposal.PieceCID, deals[0].Proposal.Provider) + - fmt.Sprintf("but deal with piece CID %s is for provider %s", dl.Proposal.PieceCID, dl.Proposal.Provider) - return cid.Undef, xerrors.Errorf(msg) - } - } - - mi, err := p.api.StateMinerInfo(p.ctx, provider, types.EmptyTSK) - if err != nil { - return cid.Undef, err - } - - params, err := actors.SerializeParams(&market.PublishStorageDealsParams{ - Deals: deals, - }) - - if err != nil { - return cid.Undef, xerrors.Errorf("serializing PublishStorageDeals params failed: %w", err) - } - - addr, _, err := p.as.AddressFor(p.ctx, p.api, mi, api.DealPublishAddr, big.Zero(), big.Zero()) - if err != nil { - return cid.Undef, xerrors.Errorf("selecting address for publishing deals: %w", err) - } - - smsg, err := p.api.MpoolPushMessage(p.ctx, &types.Message{ - To: builtin.StorageMarketActorAddr, - From: addr, - Value: types.NewInt(0), - Method: builtin.MethodsMarket.PublishStorageDeals, - Params: params, - }, p.publishSpec) - - if err != nil { - return cid.Undef, err - } - return smsg.Cid(), nil -} - -func pieceCids(deals []market.ClientDealProposal) string { - cids := make([]string, 0, len(deals)) - for _, dl := range deals { - cids = append(cids, dl.Proposal.PieceCID.String()) - } - return strings.Join(cids, ", ") -} - -// filter out deals that have been cancelled -func (p *DealPublisher) filterCancelledDeals() { - filtered := p.pending[:0] - for _, pd := range p.pending { - if pd.ctx.Err() != nil { - continue - } - filtered = append(filtered, pd) - } - p.pending = filtered -} diff --git a/markets/storageadapter/dealpublisher_test.go b/markets/storageadapter/dealpublisher_test.go deleted file mode 100644 index 35169bf41b9..00000000000 --- a/markets/storageadapter/dealpublisher_test.go +++ /dev/null @@ -1,423 +0,0 @@ -// stm: #unit -package storageadapter - -import ( - "bytes" - "context" - "testing" - "time" - - "github.com/ipfs/go-cid" - "github.com/raulk/clock" - "github.com/stretchr/testify/require" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/go-state-types/exitcode" - tutils "github.com/filecoin-project/specs-actors/v2/support/testing" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/builtin/market" - "github.com/filecoin-project/lotus/chain/types" -) - -func TestDealPublisher(t *testing.T) { - //stm: @MARKET_DEAL_PUBLISHER_PUBLISH_001, @MARKET_DEAL_PUBLISHER_GET_PENDING_DEALS_001 - oldClock := build.Clock - t.Cleanup(func() { build.Clock = oldClock }) - mc := clock.NewMock() - build.Clock = mc - - testCases := []struct { - name string - publishPeriod time.Duration - maxDealsPerMsg uint64 - dealCountWithinPublishPeriod int - ctxCancelledWithinPublishPeriod int - expiredDeals int - dealCountAfterPublishPeriod int - expectedDealsPerMsg []int - failOne bool - }{{ - name: "publish one deal within publish period", - publishPeriod: 10 * time.Millisecond, - maxDealsPerMsg: 5, - dealCountWithinPublishPeriod: 1, - dealCountAfterPublishPeriod: 0, - expectedDealsPerMsg: []int{1}, - }, { - name: "publish two deals within publish period", - publishPeriod: 10 * time.Millisecond, - maxDealsPerMsg: 5, - dealCountWithinPublishPeriod: 2, - dealCountAfterPublishPeriod: 0, - expectedDealsPerMsg: []int{2}, - }, { - name: "publish one deal within publish period, and one after", - publishPeriod: 10 * time.Millisecond, - maxDealsPerMsg: 5, - dealCountWithinPublishPeriod: 1, - dealCountAfterPublishPeriod: 1, - expectedDealsPerMsg: []int{1, 1}, - }, { - name: "publish deals that exceed max deals per message within publish period, and one after", - publishPeriod: 10 * time.Millisecond, - maxDealsPerMsg: 2, - dealCountWithinPublishPeriod: 3, - dealCountAfterPublishPeriod: 1, - expectedDealsPerMsg: []int{2, 1, 1}, - }, { - name: "ignore deals with cancelled context", - publishPeriod: 10 * time.Millisecond, - maxDealsPerMsg: 5, - dealCountWithinPublishPeriod: 2, - ctxCancelledWithinPublishPeriod: 2, - dealCountAfterPublishPeriod: 1, - expectedDealsPerMsg: []int{2, 1}, - }, { - name: "ignore expired deals", - publishPeriod: 10 * time.Millisecond, - maxDealsPerMsg: 5, - dealCountWithinPublishPeriod: 2, - expiredDeals: 2, - dealCountAfterPublishPeriod: 1, - expectedDealsPerMsg: []int{2, 1}, - }, { - name: "zero config", - publishPeriod: 0, - maxDealsPerMsg: 0, - dealCountWithinPublishPeriod: 2, - ctxCancelledWithinPublishPeriod: 0, - dealCountAfterPublishPeriod: 2, - expectedDealsPerMsg: []int{1, 1, 1, 1}, - }, { - name: "one deal failing doesn't fail the entire batch", - publishPeriod: 10 * time.Millisecond, - maxDealsPerMsg: 5, - dealCountWithinPublishPeriod: 2, - dealCountAfterPublishPeriod: 0, - failOne: true, - expectedDealsPerMsg: []int{1}, - }} - - for _, tc := range testCases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - mc.Set(time.Now()) - dpapi := newDPAPI(t) - - // Create a deal publisher - dp := newDealPublisher(dpapi, nil, PublishMsgConfig{ - Period: tc.publishPeriod, - MaxDealsPerMsg: tc.maxDealsPerMsg, - }, &api.MessageSendSpec{MaxFee: abi.NewTokenAmount(1)}) - - // Keep a record of the deals that were submitted to be published - var dealsToPublish []markettypes.ClientDealProposal - - // Publish deals within publish period - for i := 0; i < tc.dealCountWithinPublishPeriod; i++ { - if tc.failOne && i == 1 { - publishDeal(t, dp, i, false, false) - } else { - deal := publishDeal(t, dp, 0, false, false) - dealsToPublish = append(dealsToPublish, deal) - } - } - for i := 0; i < tc.ctxCancelledWithinPublishPeriod; i++ { - publishDeal(t, dp, 0, true, false) - } - for i := 0; i < tc.expiredDeals; i++ { - publishDeal(t, dp, 0, false, true) - } - - // Wait until publish period has elapsed - if tc.publishPeriod > 0 { - // If we expect deals to get stuck in the queue, wait until that happens - if tc.maxDealsPerMsg != 0 && tc.dealCountWithinPublishPeriod%int(tc.maxDealsPerMsg) != 0 { - require.Eventually(t, func() bool { - dp.lk.Lock() - defer dp.lk.Unlock() - return !dp.publishPeriodStart.IsZero() - }, time.Second, time.Millisecond, "failed to queue deals") - } - - // Then wait to send - require.Eventually(t, func() bool { - dp.lk.Lock() - defer dp.lk.Unlock() - - // Advance if necessary. - if mc.Since(dp.publishPeriodStart) <= tc.publishPeriod { - dp.lk.Unlock() - mc.Set(dp.publishPeriodStart.Add(tc.publishPeriod + 1)) - dp.lk.Lock() - } - - return len(dp.pending) == 0 - }, time.Second, time.Millisecond, "failed to send pending messages") - } - - // Publish deals after publish period - for i := 0; i < tc.dealCountAfterPublishPeriod; i++ { - deal := publishDeal(t, dp, 0, false, false) - dealsToPublish = append(dealsToPublish, deal) - } - - if tc.publishPeriod > 0 && tc.dealCountAfterPublishPeriod > 0 { - require.Eventually(t, func() bool { - dp.lk.Lock() - defer dp.lk.Unlock() - if mc.Since(dp.publishPeriodStart) <= tc.publishPeriod { - dp.lk.Unlock() - mc.Set(dp.publishPeriodStart.Add(tc.publishPeriod + 1)) - dp.lk.Lock() - } - return len(dp.pending) == 0 - }, time.Second, time.Millisecond, "failed to send pending messages") - } - - checkPublishedDeals(t, dpapi, dealsToPublish, tc.expectedDealsPerMsg) - }) - } -} - -func TestForcePublish(t *testing.T) { - //stm: @MARKET_DEAL_PUBLISHER_PUBLISH_001, @MARKET_DEAL_PUBLISHER_GET_PENDING_DEALS_001 - //stm: @MARKET_DEAL_PUBLISHER_FORCE_PUBLISH_ALL_001 - dpapi := newDPAPI(t) - - // Create a deal publisher - start := build.Clock.Now() - publishPeriod := time.Hour - dp := newDealPublisher(dpapi, nil, PublishMsgConfig{ - Period: publishPeriod, - MaxDealsPerMsg: 10, - }, &api.MessageSendSpec{MaxFee: abi.NewTokenAmount(1)}) - - // Queue three deals for publishing, one with a cancelled context - var dealsToPublish []markettypes.ClientDealProposal - // 1. Regular deal - deal := publishDeal(t, dp, 0, false, false) - dealsToPublish = append(dealsToPublish, deal) - // 2. Deal with cancelled context - publishDeal(t, dp, 0, true, false) - // 3. Regular deal - deal = publishDeal(t, dp, 0, false, false) - dealsToPublish = append(dealsToPublish, deal) - - // Allow a moment for them to be queued - build.Clock.Sleep(10 * time.Millisecond) - - // Should be two deals in the pending deals list - // (deal with cancelled context is ignored) - pendingInfo := dp.PendingDeals() - require.Len(t, pendingInfo.Deals, 2) - require.Equal(t, publishPeriod, pendingInfo.PublishPeriod) - require.True(t, pendingInfo.PublishPeriodStart.After(start)) - require.True(t, pendingInfo.PublishPeriodStart.Before(build.Clock.Now())) - - // Force publish all pending deals - dp.ForcePublishPendingDeals() - - // Should be no pending deals - pendingInfo = dp.PendingDeals() - require.Len(t, pendingInfo.Deals, 0) - - // Make sure the expected deals were published - checkPublishedDeals(t, dpapi, dealsToPublish, []int{2}) -} - -func publishDeal(t *testing.T, dp *DealPublisher, invalid int, ctxCancelled bool, expired bool) markettypes.ClientDealProposal { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - pctx := ctx - if ctxCancelled { - pctx, cancel = context.WithCancel(ctx) - cancel() - } - - startEpoch := abi.ChainEpoch(20) - if expired { - startEpoch = abi.ChainEpoch(5) - } - deal := markettypes.ClientDealProposal{ - Proposal: markettypes.DealProposal{ - PieceCID: generateCids(1)[0], - Client: getClientActor(t), - Provider: getProviderActor(t), - StartEpoch: startEpoch, - EndEpoch: abi.ChainEpoch(120), - PieceSize: abi.PaddedPieceSize(invalid), // pass invalid into StateCall below - }, - ClientSignature: crypto.Signature{ - Type: crypto.SigTypeSecp256k1, - Data: []byte("signature data"), - }, - } - - go func() { - _, err := dp.Publish(pctx, deal) - - // If the test has completed just bail out without checking for errors - if ctx.Err() != nil { - return - } - - if ctxCancelled || expired || invalid == 1 { - require.Error(t, err) - } else { - require.NoError(t, err) - } - }() - - return deal -} - -func checkPublishedDeals(t *testing.T, dpapi *dpAPI, dealsToPublish []markettypes.ClientDealProposal, expectedDealsPerMsg []int) { - // For each message that was expected to be sent - var publishedDeals []markettypes.ClientDealProposal - for _, expectedDealsInMsg := range expectedDealsPerMsg { - // Should have called StateMinerInfo with the provider address - stateMinerInfoAddr := <-dpapi.stateMinerInfoCalls - require.Equal(t, getProviderActor(t), stateMinerInfoAddr) - - // Check the fields of the message that was sent - msg := <-dpapi.pushedMsgs - require.Equal(t, getWorkerActor(t), msg.From) - require.Equal(t, market.Address, msg.To) - require.Equal(t, market.Methods.PublishStorageDeals, msg.Method) - - // Check that the expected number of deals was included in the message - var params markettypes.PublishStorageDealsParams - err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)) - require.NoError(t, err) - require.Len(t, params.Deals, expectedDealsInMsg) - - // Keep track of the deals that were sent - for _, d := range params.Deals { - publishedDeals = append(publishedDeals, d) - } - } - - // Verify that all deals that were submitted to be published were - // sent out (we do this by ensuring all the piece CIDs are present) - require.True(t, matchPieceCids(publishedDeals, dealsToPublish)) -} - -func matchPieceCids(sent []markettypes.ClientDealProposal, exp []markettypes.ClientDealProposal) bool { - cidsA := dealPieceCids(sent) - cidsB := dealPieceCids(exp) - - if len(cidsA) != len(cidsB) { - return false - } - - s1 := cid.NewSet() - for _, c := range cidsA { - s1.Add(c) - } - - for _, c := range cidsB { - if !s1.Has(c) { - return false - } - } - - return true -} - -func dealPieceCids(deals []markettypes.ClientDealProposal) []cid.Cid { - cids := make([]cid.Cid, 0, len(deals)) - for _, dl := range deals { - cids = append(cids, dl.Proposal.PieceCID) - } - return cids -} - -type dpAPI struct { - t *testing.T - worker address.Address - - stateMinerInfoCalls chan address.Address - pushedMsgs chan *types.Message -} - -func newDPAPI(t *testing.T) *dpAPI { - return &dpAPI{ - t: t, - worker: getWorkerActor(t), - stateMinerInfoCalls: make(chan address.Address, 128), - pushedMsgs: make(chan *types.Message, 128), - } -} - -func (d *dpAPI) ChainHead(ctx context.Context) (*types.TipSet, error) { - dummyCid, err := cid.Parse("bafkqaaa") - require.NoError(d.t, err) - return types.NewTipSet([]*types.BlockHeader{{ - Miner: tutils.NewActorAddr(d.t, "miner"), - Height: abi.ChainEpoch(10), - ParentStateRoot: dummyCid, - Messages: dummyCid, - ParentMessageReceipts: dummyCid, - BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, - BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, - }}) -} - -func (d *dpAPI) StateMinerInfo(ctx context.Context, address address.Address, key types.TipSetKey) (api.MinerInfo, error) { - d.stateMinerInfoCalls <- address - return api.MinerInfo{Worker: d.worker}, nil -} - -func (d *dpAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) { - d.pushedMsgs <- msg - return &types.SignedMessage{Message: *msg}, nil -} - -func (d *dpAPI) WalletBalance(ctx context.Context, a address.Address) (types.BigInt, error) { - panic("don't call me") -} - -func (d *dpAPI) WalletHas(ctx context.Context, a address.Address) (bool, error) { - panic("don't call me") -} - -func (d *dpAPI) StateAccountKey(ctx context.Context, a address.Address, key types.TipSetKey) (address.Address, error) { - panic("don't call me") -} - -func (d *dpAPI) StateLookupID(ctx context.Context, a address.Address, key types.TipSetKey) (address.Address, error) { - panic("don't call me") -} - -func (d *dpAPI) StateCall(ctx context.Context, message *types.Message, key types.TipSetKey) (*api.InvocResult, error) { - var p markettypes.PublishStorageDealsParams - if err := p.UnmarshalCBOR(bytes.NewReader(message.Params)); err != nil { - return nil, xerrors.Errorf("unmarshal market params: %w", err) - } - - exit := exitcode.Ok - if p.Deals[0].Proposal.PieceSize == 1 { - exit = exitcode.ErrIllegalState - } - return &api.InvocResult{MsgRct: &types.MessageReceipt{ExitCode: exit}}, nil -} - -func getClientActor(t *testing.T) address.Address { - return tutils.NewActorAddr(t, "client") -} - -func getWorkerActor(t *testing.T) address.Address { - return tutils.NewActorAddr(t, "worker") -} - -func getProviderActor(t *testing.T) address.Address { - return tutils.NewActorAddr(t, "provider") -} diff --git a/markets/storageadapter/dealstatematcher.go b/markets/storageadapter/dealstatematcher.go deleted file mode 100644 index 8d5598eae01..00000000000 --- a/markets/storageadapter/dealstatematcher.go +++ /dev/null @@ -1,85 +0,0 @@ -package storageadapter - -import ( - "context" - "sync" - - "github.com/filecoin-project/go-state-types/abi" - - actorsmarket "github.com/filecoin-project/lotus/chain/actors/builtin/market" - "github.com/filecoin-project/lotus/chain/events" - "github.com/filecoin-project/lotus/chain/events/state" - "github.com/filecoin-project/lotus/chain/types" -) - -// dealStateMatcher caches the DealStates for the most recent -// old/new tipset combination -type dealStateMatcher struct { - preds *state.StatePredicates - - lk sync.Mutex - oldTsk types.TipSetKey - newTsk types.TipSetKey - oldDealStateRoot actorsmarket.DealStates - newDealStateRoot actorsmarket.DealStates -} - -func newDealStateMatcher(preds *state.StatePredicates) *dealStateMatcher { - return &dealStateMatcher{preds: preds} -} - -// matcher returns a function that checks if the state of the given dealID -// has changed. -// It caches the DealStates for the most recent old/new tipset combination. -func (mc *dealStateMatcher) matcher(ctx context.Context, dealID abi.DealID) events.StateMatchFunc { - // The function that is called to check if the deal state has changed for - // the target deal ID - dealStateChangedForID := mc.preds.DealStateChangedForIDs([]abi.DealID{dealID}) - - // The match function is called by the events API to check if there's - // been a state change for the deal with the target deal ID - match := func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) { - mc.lk.Lock() - defer mc.lk.Unlock() - - // Check if we've already fetched the DealStates for the given tipsets - if mc.oldTsk == oldTs.Key() && mc.newTsk == newTs.Key() { - // If we fetch the DealStates and there is no difference between - // them, they are stored as nil. So we can just bail out. - if mc.oldDealStateRoot == nil || mc.newDealStateRoot == nil { - return false, nil, nil - } - - // Check if the deal state has changed for the target ID - return dealStateChangedForID(ctx, mc.oldDealStateRoot, mc.newDealStateRoot) - } - - // We haven't already fetched the DealStates for the given tipsets, so - // do so now - - // Replace dealStateChangedForID with a function that records the - // DealStates so that we can cache them - var oldDealStateRootSaved, newDealStateRootSaved actorsmarket.DealStates - recorder := func(ctx context.Context, oldDealStateRoot, newDealStateRoot actorsmarket.DealStates) (changed bool, user state.UserData, err error) { - // Record DealStates - oldDealStateRootSaved = oldDealStateRoot - newDealStateRootSaved = newDealStateRoot - - return dealStateChangedForID(ctx, oldDealStateRoot, newDealStateRoot) - } - - // Call the match function - dealDiff := mc.preds.OnStorageMarketActorChanged( - mc.preds.OnDealStateChanged(recorder)) - matched, data, err := dealDiff(ctx, oldTs.Key(), newTs.Key()) - - // Save the recorded DealStates for the tipsets - mc.oldTsk = oldTs.Key() - mc.newTsk = newTs.Key() - mc.oldDealStateRoot = oldDealStateRootSaved - mc.newDealStateRoot = newDealStateRootSaved - - return matched, data, err - } - return match -} diff --git a/markets/storageadapter/dealstatematcher_test.go b/markets/storageadapter/dealstatematcher_test.go deleted file mode 100644 index 9a46e4af917..00000000000 --- a/markets/storageadapter/dealstatematcher_test.go +++ /dev/null @@ -1,155 +0,0 @@ -// stm: #unit -package storageadapter - -import ( - "context" - "testing" - - "github.com/ipfs/go-cid" - cbornode "github.com/ipfs/go-ipld-cbor" - "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" - market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" - adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" - - bstore "github.com/filecoin-project/lotus/blockstore" - "github.com/filecoin-project/lotus/chain/events" - "github.com/filecoin-project/lotus/chain/events/state" - test "github.com/filecoin-project/lotus/chain/events/state/mock" - "github.com/filecoin-project/lotus/chain/types" -) - -func TestDealStateMatcher(t *testing.T) { - //stm: @CHAIN_STATE_GET_ACTOR_001 - ctx := context.Background() - bs := bstore.NewMemorySync() - store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs)) - - deal1 := &market2.DealState{ - SectorStartEpoch: 1, - LastUpdatedEpoch: 2, - } - deal2 := &market2.DealState{ - SectorStartEpoch: 4, - LastUpdatedEpoch: 5, - } - deal3 := &market2.DealState{ - SectorStartEpoch: 7, - LastUpdatedEpoch: 8, - } - deals1 := map[abi.DealID]*market2.DealState{ - abi.DealID(1): deal1, - } - deals2 := map[abi.DealID]*market2.DealState{ - abi.DealID(1): deal2, - } - deals3 := map[abi.DealID]*market2.DealState{ - abi.DealID(1): deal3, - } - - deal1StateC := createMarketState(ctx, t, store, deals1) - deal2StateC := createMarketState(ctx, t, store, deals2) - deal3StateC := createMarketState(ctx, t, store, deals3) - - minerAddr, err := address.NewFromString("t00") - require.NoError(t, err) - ts1, err := test.MockTipset(minerAddr, 1) - require.NoError(t, err) - ts2, err := test.MockTipset(minerAddr, 2) - require.NoError(t, err) - ts3, err := test.MockTipset(minerAddr, 3) - require.NoError(t, err) - - api := test.NewMockAPI(bs) - api.SetActor(ts1.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: deal1StateC}) - api.SetActor(ts2.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: deal2StateC}) - api.SetActor(ts3.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: deal3StateC}) - - t.Run("caching", func(t *testing.T) { - dsm := newDealStateMatcher(state.NewStatePredicates(api)) - matcher := dsm.matcher(ctx, abi.DealID(1)) - - // Call matcher with tipsets that have the same state - ok, stateChange, err := matcher(ts1, ts1) - require.NoError(t, err) - require.False(t, ok) - require.Nil(t, stateChange) - // Should call StateGetActor once for each tipset - require.Equal(t, 2, api.StateGetActorCallCount()) - - // Call matcher with tipsets that have different state - api.ResetCallCounts() - ok, stateChange, err = matcher(ts1, ts2) - require.NoError(t, err) - require.True(t, ok) - require.NotNil(t, stateChange) - // Should call StateGetActor once for each tipset - require.Equal(t, 2, api.StateGetActorCallCount()) - - // Call matcher again with the same tipsets as above, should be cached - api.ResetCallCounts() - ok, stateChange, err = matcher(ts1, ts2) - require.NoError(t, err) - require.True(t, ok) - require.NotNil(t, stateChange) - // Should not call StateGetActor (because it should hit the cache) - require.Equal(t, 0, api.StateGetActorCallCount()) - - // Call matcher with different tipsets, should not be cached - api.ResetCallCounts() - ok, stateChange, err = matcher(ts2, ts3) - require.NoError(t, err) - require.True(t, ok) - require.NotNil(t, stateChange) - // Should call StateGetActor once for each tipset - require.Equal(t, 2, api.StateGetActorCallCount()) - }) - - t.Run("parallel", func(t *testing.T) { - api.ResetCallCounts() - dsm := newDealStateMatcher(state.NewStatePredicates(api)) - matcher := dsm.matcher(ctx, abi.DealID(1)) - - // Call matcher with lots of go-routines in parallel - var eg errgroup.Group - res := make([]struct { - ok bool - stateChange events.StateChange - }, 20) - for i := 0; i < len(res); i++ { - i := i - eg.Go(func() error { - ok, stateChange, err := matcher(ts1, ts2) - res[i].ok = ok - res[i].stateChange = stateChange - return err - }) - } - err := eg.Wait() - require.NoError(t, err) - - // All go-routines should have got the same (cached) result - for i := 1; i < len(res); i++ { - require.Equal(t, res[i].ok, res[i-1].ok) - require.Equal(t, res[i].stateChange, res[i-1].stateChange) - } - - // Only one go-routine should have called StateGetActor - // (once for each tipset) - require.Equal(t, 2, api.StateGetActorCallCount()) - }) -} - -func createMarketState(ctx context.Context, t *testing.T, store adt2.Store, deals map[abi.DealID]*market2.DealState) cid.Cid { - dealRootCid := test.CreateDealAMT(ctx, t, store, deals) - state := test.CreateEmptyMarketState(t, store) - state.States = dealRootCid - - stateC, err := store.Put(ctx, state) - require.NoError(t, err) - return stateC -} diff --git a/markets/storageadapter/ondealsectorcommitted.go b/markets/storageadapter/ondealsectorcommitted.go deleted file mode 100644 index 54ddb73b334..00000000000 --- a/markets/storageadapter/ondealsectorcommitted.go +++ /dev/null @@ -1,418 +0,0 @@ -package storageadapter - -import ( - "bytes" - "context" - "sync" - - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin" - miner2 "github.com/filecoin-project/go-state-types/builtin/v11/miner" - "github.com/filecoin-project/go-state-types/builtin/v8/miner" - "github.com/filecoin-project/go-state-types/builtin/v9/market" - - "github.com/filecoin-project/lotus/build" - lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/events" - "github.com/filecoin-project/lotus/chain/types" - pipeline "github.com/filecoin-project/lotus/storage/pipeline" -) - -type eventsCalledAPI interface { - Called(ctx context.Context, check events.CheckFunc, msgHnd events.MsgHandler, rev events.RevertHandler, confidence int, timeout abi.ChainEpoch, mf events.MsgMatchFunc) error -} - -type dealInfoAPI interface { - GetCurrentDealInfo(ctx context.Context, tsk types.TipSetKey, proposal *market.DealProposal, publishCid cid.Cid) (pipeline.CurrentDealInfo, error) -} - -type diffPreCommitsAPI interface { - diffPreCommits(ctx context.Context, actor address.Address, pre, cur types.TipSetKey) (*lminer.PreCommitChanges, error) -} - -type SectorCommittedManager struct { - ev eventsCalledAPI - dealInfo dealInfoAPI - dpc diffPreCommitsAPI -} - -func NewSectorCommittedManager(ev eventsCalledAPI, tskAPI pipeline.CurrentDealInfoAPI, dpcAPI diffPreCommitsAPI) *SectorCommittedManager { - dim := &pipeline.CurrentDealInfoManager{ - CDAPI: tskAPI, - } - return newSectorCommittedManager(ev, dim, dpcAPI) -} - -func newSectorCommittedManager(ev eventsCalledAPI, dealInfo dealInfoAPI, dpcAPI diffPreCommitsAPI) *SectorCommittedManager { - return &SectorCommittedManager{ - ev: ev, - dealInfo: dealInfo, - dpc: dpcAPI, - } -} - -func (mgr *SectorCommittedManager) OnDealSectorPreCommitted(ctx context.Context, provider address.Address, proposal market.DealProposal, publishCid cid.Cid, callback storagemarket.DealSectorPreCommittedCallback) error { - // Ensure callback is only called once - var once sync.Once - cb := func(sectorNumber abi.SectorNumber, isActive bool, err error) { - once.Do(func() { - callback(sectorNumber, isActive, err) - }) - } - - // First check if the deal is already active, and if so, bail out - checkFunc := func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) { - dealInfo, isActive, err := mgr.checkIfDealAlreadyActive(ctx, ts, &proposal, publishCid) - if err != nil { - // Note: the error returned from here will end up being returned - // from OnDealSectorPreCommitted so no need to call the callback - // with the error - return false, false, xerrors.Errorf("failed to check deal activity: %w", err) - } - - if isActive { - // Deal is already active, bail out - cb(0, true, nil) - return true, false, nil - } - - // Check that precommits which landed between when the deal was published - // and now don't already contain the deal we care about. - // (this can happen when the precommit lands vary quickly (in tests), or - // when the client node was down after the deal was published, and when - // the precommit containing it landed on chain) - - diff, err := mgr.dpc.diffPreCommits(ctx, provider, dealInfo.PublishMsgTipSet, ts.Key()) - if err != nil { - return false, false, xerrors.Errorf("failed to diff precommits: %w", err) - } - - for _, info := range diff.Added { - for _, d := range info.Info.DealIDs { - if d == dealInfo.DealID { - cb(info.Info.SectorNumber, false, nil) - return true, false, nil - } - } - } - - // Not yet active, start matching against incoming messages - return false, true, nil - } - - // Watch for a pre-commit message to the provider. - matchEvent := func(msg *types.Message) (bool, error) { - matched := msg.To == provider && (msg.Method == builtin.MethodsMiner.PreCommitSector || - msg.Method == builtin.MethodsMiner.PreCommitSectorBatch || - msg.Method == builtin.MethodsMiner.PreCommitSectorBatch2 || - msg.Method == builtin.MethodsMiner.ProveReplicaUpdates) - return matched, nil - } - - // The deal must be accepted by the deal proposal start epoch, so timeout - // if the chain reaches that epoch - timeoutEpoch := proposal.StartEpoch + 1 - - // Check if the message params included the deal ID we're looking for. - called := func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) { - defer func() { - if err != nil { - cb(0, false, xerrors.Errorf("handling applied event: %w", err)) - } - }() - - // If the deal hasn't been activated by the proposed start epoch, the - // deal will timeout (when msg == nil it means the timeout epoch was reached) - if msg == nil { - err = xerrors.Errorf("deal with piece CID %s was not activated by proposed deal start epoch %d", proposal.PieceCID, proposal.StartEpoch) - return false, err - } - - // Ignore the pre-commit message if it was not executed successfully - if rec.ExitCode != 0 { - return true, nil - } - - // When there is a reorg, the deal ID may change, so get the - // current deal ID from the publish message CID - res, err := mgr.dealInfo.GetCurrentDealInfo(ctx, ts.Key(), &proposal, publishCid) - if err != nil { - return false, xerrors.Errorf("failed to get dealinfo: %w", err) - } - - // If this is a replica update method that succeeded the deal is active - if msg.Method == builtin.MethodsMiner.ProveReplicaUpdates { - sn, err := dealSectorInReplicaUpdateSuccess(msg, rec, res) - if err != nil { - return false, err - } - if sn != nil { - cb(*sn, true, nil) - return false, nil - } - // Didn't find the deal ID in this message, so keep looking - return true, nil - } - - // Extract the message parameters - sn, err := dealSectorInPreCommitMsg(msg, res) - if err != nil { - return false, xerrors.Errorf("failed to extract message params: %w", err) - } - - if sn != nil { - cb(*sn, false, nil) - } - - // Didn't find the deal ID in this message, so keep looking - return true, nil - } - - revert := func(ctx context.Context, ts *types.TipSet) error { - log.Warn("deal pre-commit reverted; TODO: actually handle this!") - // TODO: Just go back to DealSealing? - return nil - } - - if err := mgr.ev.Called(ctx, checkFunc, called, revert, int(build.MessageConfidence+1), timeoutEpoch, matchEvent); err != nil { - return xerrors.Errorf("failed to set up called handler: %w", err) - } - - return nil -} - -func (mgr *SectorCommittedManager) OnDealSectorCommitted(ctx context.Context, provider address.Address, sectorNumber abi.SectorNumber, proposal market.DealProposal, publishCid cid.Cid, callback storagemarket.DealSectorCommittedCallback) error { - // Ensure callback is only called once - var once sync.Once - cb := func(err error) { - once.Do(func() { - callback(err) - }) - } - - // First check if the deal is already active, and if so, bail out - checkFunc := func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) { - _, isActive, err := mgr.checkIfDealAlreadyActive(ctx, ts, &proposal, publishCid) - if err != nil { - // Note: the error returned from here will end up being returned - // from OnDealSectorCommitted so no need to call the callback - // with the error - return false, false, err - } - - if isActive { - // Deal is already active, bail out - cb(nil) - return true, false, nil - } - - // Not yet active, start matching against incoming messages - return false, true, nil - } - - // Match a prove-commit sent to the provider with the given sector number - matchEvent := func(msg *types.Message) (matched bool, err error) { - if msg.To != provider { - return false, nil - } - - return sectorInCommitMsg(msg, sectorNumber) - } - - // The deal must be accepted by the deal proposal start epoch, so timeout - // if the chain reaches that epoch - timeoutEpoch := proposal.StartEpoch + 1 - - called := func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) { - defer func() { - if err != nil { - cb(xerrors.Errorf("handling applied event: %w", err)) - } - }() - - // If the deal hasn't been activated by the proposed start epoch, the - // deal will timeout (when msg == nil it means the timeout epoch was reached) - if msg == nil { - err := xerrors.Errorf("deal with piece CID %s was not activated by proposed deal start epoch %d", proposal.PieceCID, proposal.StartEpoch) - return false, err - } - - // Ignore the prove-commit message if it was not executed successfully - if rec.ExitCode != 0 { - return true, nil - } - - // Get the deal info - res, err := mgr.dealInfo.GetCurrentDealInfo(ctx, ts.Key(), &proposal, publishCid) - if err != nil { - return false, xerrors.Errorf("failed to look up deal on chain: %w", err) - } - - // Make sure the deal is active - if res.MarketDeal.State.SectorStartEpoch < 1 { - return false, xerrors.Errorf("deal wasn't active: deal=%d, parentState=%s, h=%d", res.DealID, ts.ParentState(), ts.Height()) - } - - log.Infof("Storage deal %d activated at epoch %d", res.DealID, res.MarketDeal.State.SectorStartEpoch) - - cb(nil) - - return false, nil - } - - revert := func(ctx context.Context, ts *types.TipSet) error { - log.Warn("deal activation reverted; TODO: actually handle this!") - // TODO: Just go back to DealSealing? - return nil - } - - if err := mgr.ev.Called(ctx, checkFunc, called, revert, int(build.MessageConfidence+1), timeoutEpoch, matchEvent); err != nil { - return xerrors.Errorf("failed to set up called handler: %w", err) - } - - return nil -} - -func dealSectorInReplicaUpdateSuccess(msg *types.Message, rec *types.MessageReceipt, res pipeline.CurrentDealInfo) (*abi.SectorNumber, error) { - var params miner.ProveReplicaUpdatesParams - if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return nil, xerrors.Errorf("unmarshal prove replica update: %w", err) - } - - var seekUpdate miner.ReplicaUpdate - var found bool - for _, update := range params.Updates { - for _, did := range update.Deals { - if did == res.DealID { - seekUpdate = update - found = true - break - } - } - } - if !found { - return nil, nil - } - - // check that this update passed validation steps - var successBf bitfield.BitField - if err := successBf.UnmarshalCBOR(bytes.NewReader(rec.Return)); err != nil { - return nil, xerrors.Errorf("unmarshal return value: %w", err) - } - success, err := successBf.IsSet(uint64(seekUpdate.SectorID)) - if err != nil { - return nil, xerrors.Errorf("failed to check success of replica update: %w", err) - } - if !success { - return nil, xerrors.Errorf("replica update %d failed", seekUpdate.SectorID) - } - return &seekUpdate.SectorID, nil -} - -// dealSectorInPreCommitMsg tries to find a sector containing the specified deal -func dealSectorInPreCommitMsg(msg *types.Message, res pipeline.CurrentDealInfo) (*abi.SectorNumber, error) { - switch msg.Method { - case builtin.MethodsMiner.PreCommitSector: - var params miner.SectorPreCommitInfo - if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return nil, xerrors.Errorf("unmarshal pre commit: %w", err) - } - - // Check through the deal IDs associated with this message - for _, did := range params.DealIDs { - if did == res.DealID { - // Found the deal ID in this message. Callback with the sector ID. - return ¶ms.SectorNumber, nil - } - } - case builtin.MethodsMiner.PreCommitSectorBatch: - var params miner.PreCommitSectorBatchParams - if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return nil, xerrors.Errorf("unmarshal pre commit: %w", err) - } - - for _, precommit := range params.Sectors { - // Check through the deal IDs associated with this message - for _, did := range precommit.DealIDs { - if did == res.DealID { - // Found the deal ID in this message. Callback with the sector ID. - return &precommit.SectorNumber, nil - } - } - } - case builtin.MethodsMiner.PreCommitSectorBatch2: - var params miner2.PreCommitSectorBatchParams2 - if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return nil, xerrors.Errorf("unmarshal pre commit: %w", err) - } - - for _, precommit := range params.Sectors { - // Check through the deal IDs associated with this message - for _, did := range precommit.DealIDs { - if did == res.DealID { - // Found the deal ID in this message. Callback with the sector ID. - return &precommit.SectorNumber, nil - } - } - } - default: - return nil, xerrors.Errorf("unexpected method %d", msg.Method) - } - - return nil, nil -} - -// sectorInCommitMsg checks if the provided message commits specified sector -func sectorInCommitMsg(msg *types.Message, sectorNumber abi.SectorNumber) (bool, error) { - switch msg.Method { - case builtin.MethodsMiner.ProveCommitSector: - var params miner.ProveCommitSectorParams - if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err) - } - - return params.SectorNumber == sectorNumber, nil - - case builtin.MethodsMiner.ProveCommitAggregate: - var params miner.ProveCommitAggregateParams - if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err) - } - - set, err := params.SectorNumbers.IsSet(uint64(sectorNumber)) - if err != nil { - return false, xerrors.Errorf("checking if sectorNumber is set in commit aggregate message: %w", err) - } - - return set, nil - - default: - return false, nil - } -} - -func (mgr *SectorCommittedManager) checkIfDealAlreadyActive(ctx context.Context, ts *types.TipSet, proposal *market.DealProposal, publishCid cid.Cid) (pipeline.CurrentDealInfo, bool, error) { - res, err := mgr.dealInfo.GetCurrentDealInfo(ctx, ts.Key(), proposal, publishCid) - if err != nil { - // TODO: This may be fine for some errors - return res, false, xerrors.Errorf("failed to look up deal on chain: %w", err) - } - - // Sector was slashed - if res.MarketDeal.State.SlashEpoch > 0 { - return res, false, xerrors.Errorf("deal %d was slashed at epoch %d", res.DealID, res.MarketDeal.State.SlashEpoch) - } - - // Sector with deal is already active - if res.MarketDeal.State.SectorStartEpoch > 0 { - return res, true, nil - } - - return res, false, nil -} diff --git a/markets/storageadapter/ondealsectorcommitted_test.go b/markets/storageadapter/ondealsectorcommitted_test.go deleted file mode 100644 index e3d3187809a..00000000000 --- a/markets/storageadapter/ondealsectorcommitted_test.go +++ /dev/null @@ -1,583 +0,0 @@ -// stm: #unit -package storageadapter - -import ( - "bytes" - "context" - "errors" - "fmt" - "math/rand" - "testing" - "time" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin" - markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" - minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" - "github.com/filecoin-project/go-state-types/cbor" - tutils "github.com/filecoin-project/specs-actors/v2/support/testing" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors/builtin/market" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/events" - test "github.com/filecoin-project/lotus/chain/events/state/mock" - "github.com/filecoin-project/lotus/chain/types" - pipeline "github.com/filecoin-project/lotus/storage/pipeline" -) - -func TestOnDealSectorPreCommitted(t *testing.T) { - label, err := markettypes.NewLabelFromString("success") - require.NoError(t, err) - - provider := address.TestAddress - ctx := context.Background() - publishCid := generateCids(1)[0] - sealedCid := generateCids(1)[0] - pieceCid := generateCids(1)[0] - dealID := abi.DealID(rand.Uint64()) - sectorNumber := abi.SectorNumber(rand.Uint64()) - proposal := market.DealProposal{ - PieceCID: pieceCid, - PieceSize: abi.PaddedPieceSize(rand.Uint64()), - Client: tutils.NewActorAddr(t, "client"), - Provider: tutils.NewActorAddr(t, "provider"), - StoragePricePerEpoch: abi.NewTokenAmount(1), - ProviderCollateral: abi.NewTokenAmount(1), - ClientCollateral: abi.NewTokenAmount(1), - Label: label, - } - unfinishedDeal := &api.MarketDeal{ - Proposal: proposal, - State: api.MarketDealState{ - SectorStartEpoch: -1, - LastUpdatedEpoch: 2, - }, - } - activeDeal := &api.MarketDeal{ - Proposal: proposal, - State: api.MarketDealState{ - SectorStartEpoch: 1, - LastUpdatedEpoch: 2, - }, - } - slashedDeal := &api.MarketDeal{ - Proposal: proposal, - State: api.MarketDealState{ - SectorStartEpoch: 1, - LastUpdatedEpoch: 2, - SlashEpoch: 2, - }, - } - type testCase struct { - currentDealInfo pipeline.CurrentDealInfo - currentDealInfoErr error - currentDealInfoErr2 error - preCommitDiff *miner.PreCommitChanges - matchStates []matchState - dealStartEpochTimeout bool - expectedCBCallCount uint64 - expectedCBSectorNumber abi.SectorNumber - expectedCBIsActive bool - expectedCBError error - expectedError error - } - testCases := map[string]testCase{ - "normal sequence": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: unfinishedDeal, - }, - matchStates: []matchState{ - { - msg: makeMessage(t, provider, builtin.MethodsMiner.PreCommitSector, &minertypes.PreCommitSectorParams{ - SectorNumber: sectorNumber, - SealedCID: sealedCid, - DealIDs: []abi.DealID{dealID}, - }), - }, - }, - expectedCBCallCount: 1, - expectedCBIsActive: false, - expectedCBSectorNumber: sectorNumber, - }, - "ignores unsuccessful pre-commit message": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: unfinishedDeal, - }, - matchStates: []matchState{ - { - msg: makeMessage(t, provider, builtin.MethodsMiner.PreCommitSector, &minertypes.PreCommitSectorParams{ - SectorNumber: sectorNumber, - SealedCID: sealedCid, - DealIDs: []abi.DealID{dealID}, - }), - // non-zero exit code indicates unsuccessful pre-commit message - receipt: &types.MessageReceipt{ExitCode: 1}, - }, - }, - expectedCBCallCount: 0, - }, - "deal already pre-committed": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: unfinishedDeal, - }, - preCommitDiff: &miner.PreCommitChanges{ - Added: []minertypes.SectorPreCommitOnChainInfo{{ - Info: minertypes.SectorPreCommitInfo{ - SectorNumber: sectorNumber, - DealIDs: []abi.DealID{dealID}, - }, - }}, - }, - expectedCBCallCount: 1, - expectedCBIsActive: false, - expectedCBSectorNumber: sectorNumber, - }, - "error getting current deal info in check func": { - currentDealInfoErr: errors.New("something went wrong"), - expectedCBCallCount: 0, - expectedError: xerrors.Errorf("failed to set up called handler: failed to check deal activity: failed to look up deal on chain: something went wrong"), - }, - "sector already active": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: activeDeal, - }, - expectedCBCallCount: 1, - expectedCBIsActive: true, - }, - "sector was slashed": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: slashedDeal, - PublishMsgTipSet: types.EmptyTSK, - }, - expectedCBCallCount: 0, - expectedError: xerrors.Errorf("failed to set up called handler: failed to check deal activity: deal %d was slashed at epoch %d", dealID, slashedDeal.State.SlashEpoch), - }, - "error getting current deal info in called func": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: unfinishedDeal, - }, - currentDealInfoErr2: errors.New("something went wrong"), - matchStates: []matchState{ - { - msg: makeMessage(t, provider, builtin.MethodsMiner.PreCommitSector, &minertypes.PreCommitSectorParams{ - SectorNumber: sectorNumber, - SealedCID: sealedCid, - DealIDs: []abi.DealID{dealID}, - }), - }, - }, - expectedCBCallCount: 1, - expectedCBError: errors.New("handling applied event: failed to get dealinfo: something went wrong"), - }, - "proposed deal epoch timeout": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: activeDeal, - }, - dealStartEpochTimeout: true, - expectedCBCallCount: 1, - expectedCBError: xerrors.Errorf("handling applied event: deal with piece CID %s was not activated by proposed deal start epoch 0", unfinishedDeal.Proposal.PieceCID), - }, - } - runTestCase := func(testCase string, data testCase) { - t.Run(testCase, func(t *testing.T) { - checkTs, err := test.MockTipset(provider, rand.Uint64()) - require.NoError(t, err) - matchMessages := make([]matchMessage, len(data.matchStates)) - for i, ms := range data.matchStates { - matchTs, err := test.MockTipset(provider, rand.Uint64()) - require.NoError(t, err) - matchMessages[i] = matchMessage{ - curH: 5, - msg: ms.msg, - msgReceipt: ms.receipt, - ts: matchTs, - } - } - eventsAPI := &fakeEvents{ - Ctx: ctx, - CheckTs: checkTs, - MatchMessages: matchMessages, - DealStartEpochTimeout: data.dealStartEpochTimeout, - } - cbCallCount := uint64(0) - var cbSectorNumber abi.SectorNumber - var cbIsActive bool - var cbError error - cb := func(secNum abi.SectorNumber, isActive bool, err error) { - cbCallCount++ - cbSectorNumber = secNum - cbIsActive = isActive - cbError = err - } - - mockPCAPI := &mockPreCommitsAPI{ - PCChanges: data.preCommitDiff, - } - mockDIAPI := &mockDealInfoAPI{ - CurrentDealInfo: data.currentDealInfo, - CurrentDealInfo2: data.currentDealInfo, - Err: data.currentDealInfoErr, - Err2: data.currentDealInfoErr2, - } - scm := newSectorCommittedManager(eventsAPI, mockDIAPI, mockPCAPI) - //stm: @MARKET_ADAPTER_ON_SECTOR_PRE_COMMIT_001 - err = scm.OnDealSectorPreCommitted(ctx, provider, proposal, publishCid, cb) - if data.expectedError == nil { - require.NoError(t, err) - } else { - require.EqualError(t, err, data.expectedError.Error()) - } - require.Equal(t, data.expectedCBSectorNumber, cbSectorNumber) - require.Equal(t, data.expectedCBIsActive, cbIsActive) - require.Equal(t, data.expectedCBCallCount, cbCallCount) - if data.expectedCBError == nil { - require.NoError(t, cbError) - } else { - require.EqualError(t, cbError, data.expectedCBError.Error()) - } - }) - } - for testCase, data := range testCases { - runTestCase(testCase, data) - } -} - -func TestOnDealSectorCommitted(t *testing.T) { - label, err := markettypes.NewLabelFromString("success") - require.NoError(t, err) - - provider := address.TestAddress - publishCid := generateCids(1)[0] - pieceCid := generateCids(1)[0] - dealID := abi.DealID(rand.Uint64()) - sectorNumber := abi.SectorNumber(rand.Uint64()) - proposal := market.DealProposal{ - PieceCID: pieceCid, - PieceSize: abi.PaddedPieceSize(rand.Uint64()), - Client: tutils.NewActorAddr(t, "client"), - Provider: tutils.NewActorAddr(t, "provider"), - StoragePricePerEpoch: abi.NewTokenAmount(1), - ProviderCollateral: abi.NewTokenAmount(1), - ClientCollateral: abi.NewTokenAmount(1), - Label: label, - } - unfinishedDeal := &api.MarketDeal{ - Proposal: proposal, - State: api.MarketDealState{ - SectorStartEpoch: -1, - LastUpdatedEpoch: 2, - }, - } - activeDeal := &api.MarketDeal{ - Proposal: proposal, - State: api.MarketDealState{ - SectorStartEpoch: 1, - LastUpdatedEpoch: 2, - }, - } - slashedDeal := &api.MarketDeal{ - Proposal: proposal, - State: api.MarketDealState{ - SectorStartEpoch: 1, - LastUpdatedEpoch: 2, - SlashEpoch: 2, - }, - } - type testCase struct { - currentDealInfo pipeline.CurrentDealInfo - currentDealInfoErr error - currentDealInfo2 pipeline.CurrentDealInfo - currentDealInfoErr2 error - matchStates []matchState - dealStartEpochTimeout bool - expectedCBCallCount uint64 - expectedCBError error - expectedError error - } - testCases := map[string]testCase{ - "normal sequence": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: unfinishedDeal, - }, - currentDealInfo2: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: activeDeal, - }, - matchStates: []matchState{ - { - msg: makeMessage(t, provider, builtin.MethodsMiner.ProveCommitSector, &minertypes.ProveCommitSectorParams{ - SectorNumber: sectorNumber, - }), - }, - }, - expectedCBCallCount: 1, - }, - "ignores unsuccessful prove-commit message": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: unfinishedDeal, - }, - currentDealInfo2: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: activeDeal, - }, - matchStates: []matchState{ - { - msg: makeMessage(t, provider, builtin.MethodsMiner.ProveCommitSector, &minertypes.ProveCommitSectorParams{ - SectorNumber: sectorNumber, - }), - // Exit-code 1 means the prove-commit was unsuccessful - receipt: &types.MessageReceipt{ExitCode: 1}, - }, - }, - expectedCBCallCount: 0, - }, - "error getting current deal info in check func": { - currentDealInfoErr: errors.New("something went wrong"), - expectedCBCallCount: 0, - expectedError: xerrors.Errorf("failed to set up called handler: failed to look up deal on chain: something went wrong"), - }, - "sector already active": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: activeDeal, - }, - expectedCBCallCount: 1, - }, - "sector was slashed": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: slashedDeal, - }, - expectedCBCallCount: 0, - expectedError: xerrors.Errorf("failed to set up called handler: deal %d was slashed at epoch %d", dealID, slashedDeal.State.SlashEpoch), - }, - "error getting current deal info in called func": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: unfinishedDeal, - }, - currentDealInfoErr2: errors.New("something went wrong"), - matchStates: []matchState{ - { - msg: makeMessage(t, provider, builtin.MethodsMiner.ProveCommitSector, &minertypes.ProveCommitSectorParams{ - SectorNumber: sectorNumber, - }), - }, - }, - expectedCBCallCount: 1, - expectedCBError: xerrors.Errorf("handling applied event: failed to look up deal on chain: something went wrong"), - }, - "proposed deal epoch timeout": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: unfinishedDeal, - }, - dealStartEpochTimeout: true, - expectedCBCallCount: 1, - expectedCBError: xerrors.Errorf("handling applied event: deal with piece CID %s was not activated by proposed deal start epoch 0", unfinishedDeal.Proposal.PieceCID), - }, - "got prove-commit but deal not active": { - currentDealInfo: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: unfinishedDeal, - }, - currentDealInfo2: pipeline.CurrentDealInfo{ - DealID: dealID, - MarketDeal: unfinishedDeal, - }, - matchStates: []matchState{ - { - msg: makeMessage(t, provider, builtin.MethodsMiner.ProveCommitSector, &minertypes.ProveCommitSectorParams{ - SectorNumber: sectorNumber, - }), - }, - }, - expectedCBCallCount: 1, - expectedCBError: xerrors.Errorf("handling applied event: deal wasn't active: deal=%d, parentState=bafkqaaa, h=5", dealID), - }, - } - runTestCase := func(testCase string, data testCase) { - t.Run(testCase, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - checkTs, err := test.MockTipset(provider, rand.Uint64()) - require.NoError(t, err) - matchMessages := make([]matchMessage, len(data.matchStates)) - for i, ms := range data.matchStates { - matchTs, err := test.MockTipset(provider, rand.Uint64()) - require.NoError(t, err) - matchMessages[i] = matchMessage{ - curH: 5, - msg: ms.msg, - msgReceipt: ms.receipt, - ts: matchTs, - } - } - eventsAPI := &fakeEvents{ - Ctx: ctx, - CheckTs: checkTs, - MatchMessages: matchMessages, - DealStartEpochTimeout: data.dealStartEpochTimeout, - } - cbCallCount := uint64(0) - var cbError error - cb := func(err error) { - cbCallCount++ - cbError = err - } - mockPCAPI := &mockPreCommitsAPI{} - mockDIAPI := &mockDealInfoAPI{ - CurrentDealInfo: data.currentDealInfo, - CurrentDealInfo2: data.currentDealInfo2, - Err: data.currentDealInfoErr, - Err2: data.currentDealInfoErr2, - } - scm := newSectorCommittedManager(eventsAPI, mockDIAPI, mockPCAPI) - //stm: @MARKET_ADAPTER_ON_SECTOR_COMMIT_001 - err = scm.OnDealSectorCommitted(ctx, provider, sectorNumber, proposal, publishCid, cb) - if data.expectedError == nil { - require.NoError(t, err) - } else { - require.EqualError(t, err, data.expectedError.Error()) - } - require.Equal(t, data.expectedCBCallCount, cbCallCount) - if data.expectedCBError == nil { - require.NoError(t, cbError) - } else { - require.EqualError(t, cbError, data.expectedCBError.Error()) - } - }) - } - for testCase, data := range testCases { - runTestCase(testCase, data) - } -} - -type matchState struct { - msg *types.Message - receipt *types.MessageReceipt -} - -type matchMessage struct { - curH abi.ChainEpoch - msg *types.Message - msgReceipt *types.MessageReceipt - ts *types.TipSet - doesRevert bool -} -type fakeEvents struct { - Ctx context.Context - CheckTs *types.TipSet - MatchMessages []matchMessage - DealStartEpochTimeout bool -} - -func (fe *fakeEvents) Called(ctx context.Context, check events.CheckFunc, msgHnd events.MsgHandler, rev events.RevertHandler, confidence int, timeout abi.ChainEpoch, mf events.MsgMatchFunc) error { - if fe.DealStartEpochTimeout { - msgHnd(nil, nil, nil, 100) // nolint:errcheck - return nil - } - - _, more, err := check(ctx, fe.CheckTs) - if err != nil { - return err - } - if !more { - return nil - } - for _, matchMessage := range fe.MatchMessages { - matched, err := mf(matchMessage.msg) - if err != nil { - return err - } - if matched { - receipt := matchMessage.msgReceipt - if receipt == nil { - receipt = &types.MessageReceipt{ExitCode: 0} - } - more, err := msgHnd(matchMessage.msg, receipt, matchMessage.ts, matchMessage.curH) - if err != nil { - // error is handled through a callback rather than being returned - return nil - } - if matchMessage.doesRevert { - err := rev(ctx, matchMessage.ts) - if err != nil { - return err - } - } - if !more { - return nil - } - } - } - return nil -} - -func makeMessage(t *testing.T, to address.Address, method abi.MethodNum, params cbor.Marshaler) *types.Message { - buf := new(bytes.Buffer) - err := params.MarshalCBOR(buf) - require.NoError(t, err) - return &types.Message{ - To: to, - Method: method, - Params: buf.Bytes(), - } -} - -var seq int - -func generateCids(n int) []cid.Cid { - cids := make([]cid.Cid, 0, n) - for i := 0; i < n; i++ { - c := blocks.NewBlock([]byte(fmt.Sprint(seq))).Cid() - seq++ - cids = append(cids, c) - } - return cids -} - -type mockPreCommitsAPI struct { - PCChanges *miner.PreCommitChanges - Err error -} - -func (m *mockPreCommitsAPI) diffPreCommits(ctx context.Context, actor address.Address, pre, cur types.TipSetKey) (*miner.PreCommitChanges, error) { - pcc := &miner.PreCommitChanges{} - if m.PCChanges != nil { - pcc = m.PCChanges - } - return pcc, m.Err -} - -type mockDealInfoAPI struct { - count int - CurrentDealInfo pipeline.CurrentDealInfo - Err error - CurrentDealInfo2 pipeline.CurrentDealInfo - Err2 error -} - -func (m *mockDealInfoAPI) GetCurrentDealInfo(ctx context.Context, tsk types.TipSetKey, proposal *market.DealProposal, publishCid cid.Cid) (pipeline.CurrentDealInfo, error) { - m.count++ - if m.count == 2 { - return m.CurrentDealInfo2, m.Err2 - } - return m.CurrentDealInfo, m.Err -} diff --git a/markets/storageadapter/provider.go b/markets/storageadapter/provider.go deleted file mode 100644 index 11742c879f6..00000000000 --- a/markets/storageadapter/provider.go +++ /dev/null @@ -1,441 +0,0 @@ -package storageadapter - -// this file implements storagemarket.StorageProviderNode - -import ( - "context" - "time" - - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - "go.uber.org/fx" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-state-types/abi" - markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/go-state-types/exitcode" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/v1api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/builtin/market" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/events" - "github.com/filecoin-project/lotus/chain/events/state" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/sigs" - "github.com/filecoin-project/lotus/markets/utils" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/node/modules/helpers" - pipeline "github.com/filecoin-project/lotus/storage/pipeline" - "github.com/filecoin-project/lotus/storage/pipeline/piece" - "github.com/filecoin-project/lotus/storage/sectorblocks" -) - -var addPieceRetryWait = 5 * time.Minute -var addPieceRetryTimeout = 6 * time.Hour -var defaultMaxProviderCollateralMultiplier = uint64(2) -var log = logging.Logger("storageadapter") - -type ProviderNodeAdapter struct { - v1api.FullNode - - secb *sectorblocks.SectorBlocks - ev *events.Events - - dealPublisher *DealPublisher - - addBalanceSpec *api.MessageSendSpec - maxDealCollateralMultiplier uint64 - dsMatcher *dealStateMatcher - scMgr *SectorCommittedManager -} - -func NewProviderNodeAdapter(fc *config.MinerFeeConfig, dc *config.DealmakingConfig) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, secb *sectorblocks.SectorBlocks, full v1api.FullNode, dealPublisher *DealPublisher) (storagemarket.StorageProviderNode, error) { - return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, secb *sectorblocks.SectorBlocks, full v1api.FullNode, dealPublisher *DealPublisher) (storagemarket.StorageProviderNode, error) { - ctx := helpers.LifecycleCtx(mctx, lc) - - ev, err := events.NewEvents(ctx, full) - if err != nil { - return nil, err - } - na := &ProviderNodeAdapter{ - FullNode: full, - - secb: secb, - ev: ev, - dealPublisher: dealPublisher, - dsMatcher: newDealStateMatcher(state.NewStatePredicates(state.WrapFastAPI(full))), - } - if fc != nil { - na.addBalanceSpec = &api.MessageSendSpec{MaxFee: abi.TokenAmount(fc.MaxMarketBalanceAddFee)} - } - na.maxDealCollateralMultiplier = defaultMaxProviderCollateralMultiplier - if dc != nil { - na.maxDealCollateralMultiplier = dc.MaxProviderCollateralMultiplier - } - na.scMgr = NewSectorCommittedManager(ev, na, &apiWrapper{api: full}) - - return na, nil - } -} - -func (n *ProviderNodeAdapter) PublishDeals(ctx context.Context, deal storagemarket.MinerDeal) (cid.Cid, error) { - return n.dealPublisher.Publish(ctx, deal.ClientDealProposal) -} - -func (n *ProviderNodeAdapter) OnDealComplete(ctx context.Context, deal storagemarket.MinerDeal, pieceSize abi.UnpaddedPieceSize, pieceData shared.ReadSeekStarter) (*storagemarket.PackingResult, error) { - if deal.PublishCid == nil { - return nil, xerrors.Errorf("deal.PublishCid can't be nil") - } - - sdInfo := piece.PieceDealInfo{ - DealID: deal.DealID, - DealProposal: &deal.Proposal, - PublishCid: deal.PublishCid, - DealSchedule: piece.DealSchedule{ - StartEpoch: deal.ClientDealProposal.Proposal.StartEpoch, - EndEpoch: deal.ClientDealProposal.Proposal.EndEpoch, - }, - KeepUnsealed: deal.FastRetrieval, - } - - // Attempt to add the piece to the sector - p, offset, err := n.secb.AddPiece(ctx, pieceSize, pieceData, sdInfo) - curTime := build.Clock.Now() - for build.Clock.Since(curTime) < addPieceRetryTimeout { - // Check if there was an error because of too many sectors being sealed - if !xerrors.Is(err, pipeline.ErrTooManySectorsSealing) { - if err != nil { - log.Errorf("failed to addPiece for deal %d, err: %v", deal.DealID, err) - } - - // There was either a fatal error or no error. In either case - // don't retry AddPiece - break - } - - // The piece could not be added to the sector because there are too - // many sectors being sealed, back-off for a while before trying again - select { - case <-build.Clock.After(addPieceRetryWait): - // Reset the reader to the start - err = pieceData.SeekStart() - if err != nil { - return nil, xerrors.Errorf("failed to reset piece reader to start before retrying AddPiece for deal %d: %w", deal.DealID, err) - } - - // Attempt to add the piece again - p, offset, err = n.secb.AddPiece(ctx, pieceSize, pieceData, sdInfo) - case <-ctx.Done(): - return nil, xerrors.New("context expired while waiting to retry AddPiece") - } - } - - if err != nil { - return nil, xerrors.Errorf("AddPiece failed: %s", err) - } - log.Warnf("New Deal: deal %d", deal.DealID) - - return &storagemarket.PackingResult{ - SectorNumber: p, - Offset: offset, - Size: pieceSize.Padded(), - }, nil -} - -func (n *ProviderNodeAdapter) VerifySignature(ctx context.Context, sig crypto.Signature, addr address.Address, input []byte, encodedTs shared.TipSetToken) (bool, error) { - addr, err := n.StateAccountKey(ctx, addr, types.EmptyTSK) - if err != nil { - return false, err - } - - err = sigs.Verify(&sig, addr, input) - return err == nil, err -} - -func (n *ProviderNodeAdapter) GetMinerWorkerAddress(ctx context.Context, maddr address.Address, tok shared.TipSetToken) (address.Address, error) { - tsk, err := types.TipSetKeyFromBytes(tok) - if err != nil { - return address.Undef, err - } - - mi, err := n.StateMinerInfo(ctx, maddr, tsk) - if err != nil { - return address.Address{}, err - } - return mi.Worker, nil -} - -func (n *ProviderNodeAdapter) GetProofType(ctx context.Context, maddr address.Address, tok shared.TipSetToken) (abi.RegisteredSealProof, error) { - tsk, err := types.TipSetKeyFromBytes(tok) - if err != nil { - return 0, err - } - - mi, err := n.StateMinerInfo(ctx, maddr, tsk) - if err != nil { - return 0, err - } - - nver, err := n.StateNetworkVersion(ctx, tsk) - if err != nil { - return 0, err - } - - // false because this variance is not consumed. - const configWantSynthetic = false - - return miner.PreferredSealProofTypeFromWindowPoStType(nver, mi.WindowPoStProofType, configWantSynthetic) -} - -func (n *ProviderNodeAdapter) SignBytes(ctx context.Context, signer address.Address, b []byte) (*crypto.Signature, error) { - signer, err := n.StateAccountKey(ctx, signer, types.EmptyTSK) - if err != nil { - return nil, err - } - - localSignature, err := n.WalletSign(ctx, signer, b) - if err != nil { - return nil, err - } - return localSignature, nil -} - -func (n *ProviderNodeAdapter) ReserveFunds(ctx context.Context, wallet, addr address.Address, amt abi.TokenAmount) (cid.Cid, error) { - return n.MarketReserveFunds(ctx, wallet, addr, amt) -} - -func (n *ProviderNodeAdapter) ReleaseFunds(ctx context.Context, addr address.Address, amt abi.TokenAmount) error { - return n.MarketReleaseFunds(ctx, addr, amt) -} - -// Adds funds with the StorageMinerActor for a storage participant. Used by both providers and clients. -func (n *ProviderNodeAdapter) AddFunds(ctx context.Context, addr address.Address, amount abi.TokenAmount) (cid.Cid, error) { - // (Provider Node API) - smsg, err := n.MpoolPushMessage(ctx, &types.Message{ - To: market.Address, - From: addr, - Value: amount, - Method: market.Methods.AddBalance, - }, n.addBalanceSpec) - if err != nil { - return cid.Undef, err - } - - return smsg.Cid(), nil -} - -func (n *ProviderNodeAdapter) GetBalance(ctx context.Context, addr address.Address, encodedTs shared.TipSetToken) (storagemarket.Balance, error) { - tsk, err := types.TipSetKeyFromBytes(encodedTs) - if err != nil { - return storagemarket.Balance{}, err - } - - bal, err := n.StateMarketBalance(ctx, addr, tsk) - if err != nil { - return storagemarket.Balance{}, err - } - - return utils.ToSharedBalance(bal), nil -} - -// TODO: why doesnt this method take in a sector ID? -func (n *ProviderNodeAdapter) LocatePieceForDealWithinSector(ctx context.Context, dealID abi.DealID, encodedTs shared.TipSetToken) (sectorID abi.SectorNumber, offset abi.PaddedPieceSize, length abi.PaddedPieceSize, err error) { - refs, err := n.secb.GetRefs(ctx, dealID) - if err != nil { - return 0, 0, 0, err - } - if len(refs) == 0 { - return 0, 0, 0, xerrors.New("no sector information for deal ID") - } - - // TODO: better strategy (e.g. look for already unsealed) - var best api.SealedRef - var bestSi api.SectorInfo - for _, r := range refs { - si, err := n.secb.SectorBuilder.SectorsStatus(ctx, r.SectorID, false) - if err != nil { - return 0, 0, 0, xerrors.Errorf("getting sector info: %w", err) - } - if si.State == api.SectorState(pipeline.Proving) { - best = r - bestSi = si - break - } - } - if bestSi.State == api.SectorState(pipeline.UndefinedSectorState) { - return 0, 0, 0, xerrors.New("no sealed sector found") - } - return best.SectorID, best.Offset, best.Size.Padded(), nil -} - -func (n *ProviderNodeAdapter) DealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, isVerified bool) (abi.TokenAmount, abi.TokenAmount, error) { - bounds, err := n.StateDealProviderCollateralBounds(ctx, size, isVerified, types.EmptyTSK) - if err != nil { - return abi.TokenAmount{}, abi.TokenAmount{}, err - } - - // The maximum amount of collateral that the provider will put into escrow - // for a deal is calculated as a multiple of the minimum bounded amount - max := types.BigMul(bounds.Min, types.NewInt(n.maxDealCollateralMultiplier)) - - return bounds.Min, max, nil -} - -// TODO: Remove dealID parameter, change publishCid to be cid.Cid (instead of pointer) -func (n *ProviderNodeAdapter) OnDealSectorPreCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, proposal markettypes.DealProposal, publishCid *cid.Cid, cb storagemarket.DealSectorPreCommittedCallback) error { - return n.scMgr.OnDealSectorPreCommitted(ctx, provider, proposal, *publishCid, cb) -} - -// TODO: Remove dealID parameter, change publishCid to be cid.Cid (instead of pointer) -func (n *ProviderNodeAdapter) OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, sectorNumber abi.SectorNumber, proposal markettypes.DealProposal, publishCid *cid.Cid, cb storagemarket.DealSectorCommittedCallback) error { - return n.scMgr.OnDealSectorCommitted(ctx, provider, sectorNumber, proposal, *publishCid, cb) -} - -func (n *ProviderNodeAdapter) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) { - head, err := n.ChainHead(ctx) - if err != nil { - return nil, 0, err - } - - return head.Key().Bytes(), head.Height(), nil -} - -func (n *ProviderNodeAdapter) WaitForMessage(ctx context.Context, mcid cid.Cid, cb func(code exitcode.ExitCode, bytes []byte, finalCid cid.Cid, err error) error) error { - receipt, err := n.StateWaitMsg(ctx, mcid, 2*build.MessageConfidence, api.LookbackNoLimit, true) - if err != nil { - return cb(0, nil, cid.Undef, err) - } - return cb(receipt.Receipt.ExitCode, receipt.Receipt.Return, receipt.Message, nil) -} - -func (n *ProviderNodeAdapter) WaitForPublishDeals(ctx context.Context, publishCid cid.Cid, proposal markettypes.DealProposal) (*storagemarket.PublishDealsWaitResult, error) { - // Wait for deal to be published (plus additional time for confidence) - receipt, err := n.StateWaitMsg(ctx, publishCid, 2*build.MessageConfidence, api.LookbackNoLimit, true) - if err != nil { - return nil, xerrors.Errorf("WaitForPublishDeals errored: %w", err) - } - if receipt.Receipt.ExitCode != exitcode.Ok { - return nil, xerrors.Errorf("WaitForPublishDeals exit code: %s", receipt.Receipt.ExitCode) - } - - // The deal ID may have changed since publish if there was a reorg, so - // get the current deal ID - head, err := n.ChainHead(ctx) - if err != nil { - return nil, xerrors.Errorf("WaitForPublishDeals failed to get chain head: %w", err) - } - - res, err := n.scMgr.dealInfo.GetCurrentDealInfo(ctx, head.Key(), &proposal, publishCid) - if err != nil { - return nil, xerrors.Errorf("WaitForPublishDeals getting deal info errored: %w", err) - } - - return &storagemarket.PublishDealsWaitResult{DealID: res.DealID, FinalCid: receipt.Message}, nil -} - -func (n *ProviderNodeAdapter) GetDataCap(ctx context.Context, addr address.Address, encodedTs shared.TipSetToken) (*abi.StoragePower, error) { - tsk, err := types.TipSetKeyFromBytes(encodedTs) - if err != nil { - return nil, err - } - - sp, err := n.StateVerifiedClientStatus(ctx, addr, tsk) - return sp, err -} - -func (n *ProviderNodeAdapter) OnDealExpiredOrSlashed(ctx context.Context, dealID abi.DealID, onDealExpired storagemarket.DealExpiredCallback, onDealSlashed storagemarket.DealSlashedCallback) error { - head, err := n.ChainHead(ctx) - if err != nil { - return xerrors.Errorf("client: failed to get chain head: %w", err) - } - - sd, err := n.StateMarketStorageDeal(ctx, dealID, head.Key()) - if err != nil { - return xerrors.Errorf("client: failed to look up deal %d on chain: %w", dealID, err) - } - - // Called immediately to check if the deal has already expired or been slashed - checkFunc := func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) { - if ts == nil { - // keep listening for events - return false, true, nil - } - - // Check if the deal has already expired - if sd.Proposal.EndEpoch <= ts.Height() { - onDealExpired(nil) - return true, false, nil - } - - // If there is no deal assume it's already been slashed - if sd.State.SectorStartEpoch < 0 { - onDealSlashed(ts.Height(), nil) - return true, false, nil - } - - // No events have occurred yet, so return - // done: false, more: true (keep listening for events) - return false, true, nil - } - - // Called when there was a match against the state change we're looking for - // and the chain has advanced to the confidence height - stateChanged := func(ts *types.TipSet, ts2 *types.TipSet, states events.StateChange, h abi.ChainEpoch) (more bool, err error) { - // Check if the deal has already expired - if ts2 == nil || sd.Proposal.EndEpoch <= ts2.Height() { - onDealExpired(nil) - return false, nil - } - - // Timeout waiting for state change - if states == nil { - log.Error("timed out waiting for deal expiry") - return false, nil - } - - changedDeals, ok := states.(state.ChangedDeals) - if !ok { - panic("Expected state.ChangedDeals") - } - - deal, ok := changedDeals[dealID] - if !ok { - // No change to deal - return true, nil - } - - // Deal was slashed - if deal.To == nil { - onDealSlashed(ts2.Height(), nil) - return false, nil - } - - return true, nil - } - - // Called when there was a chain reorg and the state change was reverted - revert := func(ctx context.Context, ts *types.TipSet) error { - // TODO: Is it ok to just ignore this? - log.Warn("deal state reverted; TODO: actually handle this!") - return nil - } - - // Watch for state changes to the deal - match := n.dsMatcher.matcher(ctx, dealID) - - // Wait until after the end epoch for the deal and then timeout - timeout := (sd.Proposal.EndEpoch - head.Height()) + 1 - if err := n.ev.StateChanged(checkFunc, stateChanged, revert, int(build.MessageConfidence)+1, timeout, match); err != nil { - return xerrors.Errorf("failed to set up state changed handler: %w", err) - } - - return nil -} - -var _ storagemarket.StorageProviderNode = &ProviderNodeAdapter{} diff --git a/markets/utils/converters.go b/markets/utils/converters.go deleted file mode 100644 index 9562de695fc..00000000000 --- a/markets/utils/converters.go +++ /dev/null @@ -1,39 +0,0 @@ -package utils - -import ( - "github.com/libp2p/go-libp2p/core/peer" - "github.com/multiformats/go-multiaddr" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - - "github.com/filecoin-project/lotus/api" -) - -func NewStorageProviderInfo(address address.Address, miner address.Address, sectorSize abi.SectorSize, peer peer.ID, addrs []abi.Multiaddrs) storagemarket.StorageProviderInfo { - multiaddrs := make([]multiaddr.Multiaddr, 0, len(addrs)) - for _, a := range addrs { - maddr, err := multiaddr.NewMultiaddrBytes(a) - if err != nil { - return storagemarket.StorageProviderInfo{} - } - multiaddrs = append(multiaddrs, maddr) - } - - return storagemarket.StorageProviderInfo{ - Address: address, - Worker: miner, - SectorSize: uint64(sectorSize), - PeerID: peer, - Addrs: multiaddrs, - } -} - -func ToSharedBalance(bal api.MarketBalance) storagemarket.Balance { - return storagemarket.Balance{ - Locked: bal.Locked, - Available: big.Sub(bal.Escrow, bal.Locked), - } -} diff --git a/markets/utils/selectors.go b/markets/utils/selectors.go deleted file mode 100644 index 1b8a62401dd..00000000000 --- a/markets/utils/selectors.go +++ /dev/null @@ -1,98 +0,0 @@ -package utils - -import ( - "bytes" - "context" - "fmt" - "io" - - // must be imported to init() raw-codec support - _ "github.com/ipld/go-ipld-prime/codec/raw" - - "github.com/ipfs/go-cid" - mdagipld "github.com/ipfs/go-ipld-format" - "github.com/ipfs/go-unixfsnode" - dagpb "github.com/ipld/go-codec-dagpb" - "github.com/ipld/go-ipld-prime" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - basicnode "github.com/ipld/go-ipld-prime/node/basic" - "github.com/ipld/go-ipld-prime/traversal" - "github.com/ipld/go-ipld-prime/traversal/selector" - selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" -) - -func TraverseDag( - ctx context.Context, - ds mdagipld.DAGService, - startFrom cid.Cid, - optionalSelector ipld.Node, - onOpen func(node mdagipld.Node) error, - visitCallback traversal.AdvVisitFn, -) error { - - if optionalSelector == nil { - optionalSelector = selectorparse.CommonSelector_MatchAllRecursively - } - - parsedSelector, err := selector.ParseSelector(optionalSelector) - if err != nil { - return err - } - - // not sure what this is for TBH: we also provide ctx in &traversal.Config{} - linkContext := ipld.LinkContext{Ctx: ctx} - - // this is what allows us to understand dagpb - nodePrototypeChooser := dagpb.AddSupportToChooser( - func(ipld.Link, ipld.LinkContext) (ipld.NodePrototype, error) { - return basicnode.Prototype.Any, nil - }, - ) - - // this is how we implement GETs - linkSystem := cidlink.DefaultLinkSystem() - linkSystem.StorageReadOpener = func(lctx ipld.LinkContext, lnk ipld.Link) (io.Reader, error) { - cl, isCid := lnk.(cidlink.Link) - if !isCid { - return nil, fmt.Errorf("unexpected link type %#v", lnk) - } - - node, err := ds.Get(lctx.Ctx, cl.Cid) - if err != nil { - return nil, err - } - - if onOpen != nil { - if err := onOpen(node); err != nil { - return nil, err - } - } - - return bytes.NewBuffer(node.RawData()), nil - } - unixfsnode.AddUnixFSReificationToLinkSystem(&linkSystem) - - // this is how we pull the start node out of the DS - startLink := cidlink.Link{Cid: startFrom} - startNodePrototype, err := nodePrototypeChooser(startLink, linkContext) - if err != nil { - return err - } - startNode, err := linkSystem.Load( - linkContext, - startLink, - startNodePrototype, - ) - if err != nil { - return err - } - - // this is the actual execution, invoking the supplied callback - return traversal.Progress{ - Cfg: &traversal.Config{ - Ctx: ctx, - LinkSystem: linkSystem, - LinkTargetNodePrototypeChooser: nodePrototypeChooser, - }, - }.WalkAdv(startNode, parsedSelector, visitCallback) -} diff --git a/metrics/metrics.go b/metrics/metrics.go index 85b9d82ec20..c47642dc4a2 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -74,22 +74,6 @@ var ( PeerCount = stats.Int64("peer/count", "Current number of FIL peers", stats.UnitDimensionless) APIRequestDuration = stats.Float64("api/request_duration_ms", "Duration of API requests", stats.UnitMilliseconds) - // graphsync - - GraphsyncReceivingPeersCount = stats.Int64("graphsync/receiving_peers", "number of peers we are receiving graphsync data from", stats.UnitDimensionless) - GraphsyncReceivingActiveCount = stats.Int64("graphsync/receiving_active", "number of active receiving graphsync transfers", stats.UnitDimensionless) - GraphsyncReceivingCountCount = stats.Int64("graphsync/receiving_pending", "number of pending receiving graphsync transfers", stats.UnitDimensionless) - GraphsyncReceivingTotalMemoryAllocated = stats.Int64("graphsync/receiving_total_allocated", "amount of block memory allocated for receiving graphsync data", stats.UnitBytes) - GraphsyncReceivingTotalPendingAllocations = stats.Int64("graphsync/receiving_pending_allocations", "amount of block memory on hold being received pending allocation", stats.UnitBytes) - GraphsyncReceivingPeersPending = stats.Int64("graphsync/receiving_peers_pending", "number of peers we can't receive more data from cause of pending allocations", stats.UnitDimensionless) - - GraphsyncSendingPeersCount = stats.Int64("graphsync/sending_peers", "number of peers we are sending graphsync data to", stats.UnitDimensionless) - GraphsyncSendingActiveCount = stats.Int64("graphsync/sending_active", "number of active sending graphsync transfers", stats.UnitDimensionless) - GraphsyncSendingCountCount = stats.Int64("graphsync/sending_pending", "number of pending sending graphsync transfers", stats.UnitDimensionless) - GraphsyncSendingTotalMemoryAllocated = stats.Int64("graphsync/sending_total_allocated", "amount of block memory allocated for sending graphsync data", stats.UnitBytes) - GraphsyncSendingTotalPendingAllocations = stats.Int64("graphsync/sending_pending_allocations", "amount of block memory on hold from sending pending allocation", stats.UnitBytes) - GraphsyncSendingPeersPending = stats.Int64("graphsync/sending_peers_pending", "number of peers we can't send more data to cause of pending allocations", stats.UnitDimensionless) - // chain ChainNodeHeight = stats.Int64("chain/node_height", "Current Height of the node", stats.UnitDimensionless) ChainNodeHeightExpected = stats.Int64("chain/node_height_expected", "Expected Height of the node", stats.UnitDimensionless) @@ -580,56 +564,6 @@ var ( Aggregation: view.Sum(), } - // graphsync - GraphsyncReceivingPeersCountView = &view.View{ - Measure: GraphsyncReceivingPeersCount, - Aggregation: view.LastValue(), - } - GraphsyncReceivingActiveCountView = &view.View{ - Measure: GraphsyncReceivingActiveCount, - Aggregation: view.LastValue(), - } - GraphsyncReceivingCountCountView = &view.View{ - Measure: GraphsyncReceivingCountCount, - Aggregation: view.LastValue(), - } - GraphsyncReceivingTotalMemoryAllocatedView = &view.View{ - Measure: GraphsyncReceivingTotalMemoryAllocated, - Aggregation: view.LastValue(), - } - GraphsyncReceivingTotalPendingAllocationsView = &view.View{ - Measure: GraphsyncReceivingTotalPendingAllocations, - Aggregation: view.LastValue(), - } - GraphsyncReceivingPeersPendingView = &view.View{ - Measure: GraphsyncReceivingPeersPending, - Aggregation: view.LastValue(), - } - GraphsyncSendingPeersCountView = &view.View{ - Measure: GraphsyncSendingPeersCount, - Aggregation: view.LastValue(), - } - GraphsyncSendingActiveCountView = &view.View{ - Measure: GraphsyncSendingActiveCount, - Aggregation: view.LastValue(), - } - GraphsyncSendingCountCountView = &view.View{ - Measure: GraphsyncSendingCountCount, - Aggregation: view.LastValue(), - } - GraphsyncSendingTotalMemoryAllocatedView = &view.View{ - Measure: GraphsyncSendingTotalMemoryAllocated, - Aggregation: view.LastValue(), - } - GraphsyncSendingTotalPendingAllocationsView = &view.View{ - Measure: GraphsyncSendingTotalPendingAllocations, - Aggregation: view.LastValue(), - } - GraphsyncSendingPeersPendingView = &view.View{ - Measure: GraphsyncSendingPeersPending, - Aggregation: view.LastValue(), - } - // rcmgr RcmgrAllowConnView = &view.View{ Measure: RcmgrAllowConn, @@ -710,19 +644,6 @@ var views = []*view.View{ PeerCountView, APIRequestDurationView, - GraphsyncReceivingPeersCountView, - GraphsyncReceivingActiveCountView, - GraphsyncReceivingCountCountView, - GraphsyncReceivingTotalMemoryAllocatedView, - GraphsyncReceivingTotalPendingAllocationsView, - GraphsyncReceivingPeersPendingView, - GraphsyncSendingPeersCountView, - GraphsyncSendingActiveCountView, - GraphsyncSendingCountCountView, - GraphsyncSendingTotalMemoryAllocatedView, - GraphsyncSendingTotalPendingAllocationsView, - GraphsyncSendingPeersPendingView, - RcmgrAllowConnView, RcmgrBlockConnView, RcmgrAllowStreamView, diff --git a/node/builder.go b/node/builder.go index 1cd4823d533..2ea9dcac55c 100644 --- a/node/builder.go +++ b/node/builder.go @@ -33,7 +33,6 @@ import ( _ "github.com/filecoin-project/lotus/lib/sigs/bls" _ "github.com/filecoin-project/lotus/lib/sigs/delegated" _ "github.com/filecoin-project/lotus/lib/sigs/secp" - "github.com/filecoin-project/lotus/markets/storageadapter" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/impl/common" "github.com/filecoin-project/lotus/node/impl/net" @@ -69,9 +68,7 @@ var ( AutoNATSvcKey = special{10} // Libp2p option BandwidthReporterKey = special{11} // Libp2p option ConnGaterKey = special{12} // Libp2p option - DAGStoreKey = special{13} // constructor returns multiple values ResourceManagerKey = special{14} // Libp2p option - UserAgentKey = special{15} // Libp2p option ) type invoke int @@ -91,7 +88,6 @@ const ( CheckFDLimit CheckFvmConcurrency CheckUDPBufferSize - LegacyMarketsEOL // libp2p PstoreAddSelfKeysKey @@ -103,12 +99,10 @@ const ( RunHelloKey RunChainExchangeKey - RunChainGraphsync RunPeerMgrKey HandleIncomingBlocksKey HandleIncomingMessagesKey - HandleMigrateClientFundsKey HandlePaymentChannelManagerKey RelayIndexerMessagesKey @@ -266,12 +260,13 @@ func Base() Option { } // Config sets up constructors based on the provided Config -func ConfigCommon(cfg *config.Common, enableLibp2pNode bool) Option { +func ConfigCommon(cfg *config.Common, buildVersion build.BuildVersion, enableLibp2pNode bool) Option { // setup logging early lotuslog.SetLevelsFromConfig(cfg.Logging.SubsystemLevels) return Options( func(s *Settings) error { s.Config = true; return nil }, + Override(new(build.BuildVersion), buildVersion), Override(new(dtypes.APIEndpoint), func() (dtypes.APIEndpoint, error) { return multiaddr.NewMultiaddr(cfg.API.ListenAddress) }), @@ -396,7 +391,6 @@ func Test() Option { Unset(RunPeerMgrKey), Unset(new(*peermgr.PeerMgr)), Override(new(beacon.Schedule), testing.RandomBeacon), - Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{})), Override(new(index.MsgIndex), modules.DummyMsgIndex), ) } diff --git a/node/builder_chain.go b/node/builder_chain.go index 0b40e4530c4..b273a168cc1 100644 --- a/node/builder_chain.go +++ b/node/builder_chain.go @@ -6,12 +6,8 @@ import ( "go.uber.org/fx" "golang.org/x/xerrors" - "github.com/filecoin-project/go-fil-markets/discovery" - discoveryimpl "github.com/filecoin-project/go-fil-markets/discovery/impl" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain" "github.com/filecoin-project/lotus/chain/beacon" "github.com/filecoin-project/lotus/chain/consensus" @@ -32,8 +28,6 @@ import ( ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger" "github.com/filecoin-project/lotus/chain/wallet/remotewallet" "github.com/filecoin-project/lotus/lib/peermgr" - "github.com/filecoin-project/lotus/markets/retrievaladapter" - "github.com/filecoin-project/lotus/markets/storageadapter" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/hello" "github.com/filecoin-project/lotus/node/impl" @@ -104,9 +98,6 @@ var ChainNode = Options( Override(new(*messagepool.MessagePool), modules.MessagePool), Override(new(*dtypes.MpoolLocker), new(dtypes.MpoolLocker)), - // Shared graphsync (markets, serving chain) - Override(new(dtypes.Graphsync), modules.Graphsync(config.DefaultFullNode().Client.SimultaneousTransfersForStorage, config.DefaultFullNode().Client.SimultaneousTransfersForRetrieval)), - // Service: Wallet Override(new(*messagesigner.MessageSigner), messagesigner.NewMessageSigner), Override(new(messagesigner.MsgSigner), func(ms *messagesigner.MessageSigner) *messagesigner.MessageSigner { return ms }), @@ -121,23 +112,8 @@ var ChainNode = Options( Override(HandlePaymentChannelManagerKey, modules.HandlePaychManager), Override(SettlePaymentChannelsKey, settler.SettlePaymentChannels), - // Markets (common) - Override(new(*discoveryimpl.Local), modules.NewLocalDiscovery), - - // Markets (retrieval) - Override(new(discovery.PeerResolver), modules.RetrievalResolver), - Override(new(retrievalmarket.BlockstoreAccessor), modules.RetrievalBlockstoreAccessor), - Override(new(retrievalmarket.RetrievalClient), modules.RetrievalClient(false)), - Override(new(dtypes.ClientDataTransfer), modules.NewClientGraphsyncDataTransfer), - // Markets (storage) Override(new(*market.FundManager), market.NewFundManager), - Override(new(dtypes.ClientDatastore), modules.NewClientDatastore), - Override(new(storagemarket.BlockstoreAccessor), modules.StorageBlockstoreAccessor), - Override(new(*retrievaladapter.APIBlockstoreAccessor), retrievaladapter.NewAPIBlockstoreAdapter), - Override(new(storagemarket.StorageClient), modules.StorageClient), - Override(new(storagemarket.StorageClientNode), storageadapter.NewClientNodeAdapter), - Override(HandleMigrateClientFundsKey, modules.HandleMigrateClientFunds), Override(new(*full.GasPriceCache), full.NewGasPriceCache), @@ -184,7 +160,7 @@ func ConfigFullNode(c interface{}) Option { enableLibp2pNode := true // always enable libp2p for full nodes return Options( - ConfigCommon(&cfg.Common, enableLibp2pNode), + ConfigCommon(&cfg.Common, build.NodeUserVersion(), enableLibp2pNode), Override(new(dtypes.UniversalBlockstore), modules.UniversalBlockstore), @@ -224,14 +200,6 @@ func ConfigFullNode(c interface{}) Option { // as it enables us to serve logs in eth_getTransactionReceipt. If(cfg.Fevm.EnableEthRPC || cfg.Events.EnableActorEventsAPI, Override(StoreEventsKey, modules.EnableStoringEvents)), - Override(new(dtypes.ClientImportMgr), modules.ClientImportMgr), - - Override(new(dtypes.ClientBlockstore), modules.ClientBlockstore), - - Override(new(dtypes.Graphsync), modules.Graphsync(cfg.Client.SimultaneousTransfersForStorage, cfg.Client.SimultaneousTransfersForRetrieval)), - - Override(new(retrievalmarket.RetrievalClient), modules.RetrievalClient(cfg.Client.OffChainRetrieval)), - If(cfg.Wallet.RemoteBackend != "", Override(new(*remotewallet.RemoteWallet), remotewallet.SetupRemoteWallet(cfg.Wallet.RemoteBackend)), ), diff --git a/node/builder_miner.go b/node/builder_miner.go index 08c71ba1976..fddec7785cc 100644 --- a/node/builder_miner.go +++ b/node/builder_miner.go @@ -2,16 +2,10 @@ package node import ( "errors" - "time" - provider "github.com/ipni/index-provider" "go.uber.org/fx" "golang.org/x/xerrors" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" @@ -20,12 +14,6 @@ import ( "github.com/filecoin-project/lotus/chain/gen" "github.com/filecoin-project/lotus/chain/gen/slashfilter" "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/markets/dagstore" - "github.com/filecoin-project/lotus/markets/dealfilter" - "github.com/filecoin-project/lotus/markets/idxprov" - "github.com/filecoin-project/lotus/markets/retrievaladapter" - "github.com/filecoin-project/lotus/markets/sectoraccessor" - "github.com/filecoin-project/lotus/markets/storageadapter" "github.com/filecoin-project/lotus/miner" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/impl" @@ -62,21 +50,6 @@ func ConfigStorageMiner(c interface{}) Option { return Error(xerrors.Errorf("invalid config from repo, got: %T", c)) } - pricingConfig := cfg.Dealmaking.RetrievalPricing - if pricingConfig.Strategy == config.RetrievalPricingExternalMode { - if pricingConfig.External == nil { - return Error(xerrors.New("retrieval pricing policy has been to set to external but external policy config is nil")) - } - - if pricingConfig.External.Path == "" { - return Error(xerrors.New("retrieval pricing policy has been to set to external but external script path is empty")) - } - } else if pricingConfig.Strategy != config.RetrievalPricingDefaultMode { - return Error(xerrors.New("retrieval pricing policy must be either default or external")) - } - - enableLibp2pNode := cfg.Subsystems.EnableMarkets // we enable libp2p nodes if the storage market subsystem is enabled, otherwise we don't - return Options( Override(new(v1api.FullNode), modules.MakeUuidWrapper), @@ -84,7 +57,7 @@ func ConfigStorageMiner(c interface{}) Option { Override(new(dtypes.DrandSchedule), modules.BuiltinDrandConfig), Override(new(dtypes.BootstrapPeers), modules.BuiltinBootstrap), Override(new(dtypes.DrandBootstrap), modules.DrandBootstrap), - ConfigCommon(&cfg.Common, enableLibp2pNode), + ConfigCommon(&cfg.Common, build.NodeUserVersion(), false), Override(CheckFDLimit, modules.CheckFdLimit(build.MinerFDLimit)), // recommend at least 100k FD limit to miners @@ -93,7 +66,6 @@ func ConfigStorageMiner(c interface{}) Option { Override(new(*paths.Local), modules.LocalStorage), Override(new(*paths.Remote), modules.RemoteStorage), Override(new(paths.Store), From(new(*paths.Remote))), - Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(cfg.Dealmaking)), If(cfg.Subsystems.EnableMining || cfg.Subsystems.EnableSealing, Override(GetParamsKey, modules.GetParams(!cfg.Proving.DisableBuiltinWindowPoSt || !cfg.Proving.DisableBuiltinWinningPoSt || cfg.Storage.AllowCommit || cfg.Storage.AllowProveReplicaUpdate2)), @@ -164,88 +136,6 @@ func ConfigStorageMiner(c interface{}) Option { Override(new(paths.SectorIndex), From(new(modules.MinerSealingService))), ), - If(cfg.Subsystems.EnableMarkets, - - // Alert that legacy-markets is being deprecated - Override(LegacyMarketsEOL, modules.LegacyMarketsEOL), - - // Markets - Override(new(dtypes.StagingBlockstore), modules.StagingBlockstore), - Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(cfg.Dealmaking.SimultaneousTransfersForStorage, cfg.Dealmaking.SimultaneousTransfersForStoragePerClient, cfg.Dealmaking.SimultaneousTransfersForRetrieval)), - Override(new(dtypes.ProviderPieceStore), modules.NewProviderPieceStore), - Override(new(*sectorblocks.SectorBlocks), sectorblocks.NewSectorBlocks), - - // Markets (retrieval deps) - Override(new(sectorstorage.PieceProvider), sectorstorage.NewPieceProvider), - Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(config.DealmakingConfig{ - RetrievalPricing: &config.RetrievalPricing{ - Strategy: config.RetrievalPricingDefaultMode, - Default: &config.RetrievalPricingDefault{}, - }, - })), - Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(cfg.Dealmaking)), - - // DAG Store - Override(new(dagstore.MinerAPI), modules.NewMinerAPI(cfg.DAGStore)), - Override(DAGStoreKey, modules.DAGStore(cfg.DAGStore)), - - // Markets (retrieval) - Override(new(dagstore.SectorAccessor), sectoraccessor.NewSectorAccessor), - Override(new(retrievalmarket.SectorAccessor), From(new(dagstore.SectorAccessor))), - Override(new(retrievalmarket.RetrievalProviderNode), retrievaladapter.NewRetrievalProviderNode), - Override(new(rmnet.RetrievalMarketNetwork), modules.RetrievalNetwork), - Override(new(retrievalmarket.RetrievalProvider), modules.RetrievalProvider), - Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(nil)), - Override(HandleRetrievalKey, modules.HandleRetrieval), - - // Markets (storage) - Override(new(dtypes.ProviderTransferNetwork), modules.NewProviderTransferNetwork), - Override(new(dtypes.ProviderTransport), modules.NewProviderTransport), - Override(new(dtypes.ProviderDataTransfer), modules.NewProviderDataTransfer), - Override(new(idxprov.MeshCreator), idxprov.NewMeshCreator), - Override(new(provider.Interface), modules.IndexProvider(cfg.IndexProvider)), - Override(new(*storedask.StoredAsk), modules.NewStorageAsk), - Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(cfg.Dealmaking, nil)), - Override(new(storagemarket.StorageProvider), modules.StorageProvider), - Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{})), - Override(HandleMigrateProviderFundsKey, modules.HandleMigrateProviderFunds), - Override(HandleDealsKey, modules.HandleDeals), - - // Config (todo: get a real property system) - Override(new(dtypes.ConsiderOnlineStorageDealsConfigFunc), modules.NewConsiderOnlineStorageDealsConfigFunc), - Override(new(dtypes.SetConsiderOnlineStorageDealsConfigFunc), modules.NewSetConsideringOnlineStorageDealsFunc), - Override(new(dtypes.ConsiderOnlineRetrievalDealsConfigFunc), modules.NewConsiderOnlineRetrievalDealsConfigFunc), - Override(new(dtypes.SetConsiderOnlineRetrievalDealsConfigFunc), modules.NewSetConsiderOnlineRetrievalDealsConfigFunc), - Override(new(dtypes.StorageDealPieceCidBlocklistConfigFunc), modules.NewStorageDealPieceCidBlocklistConfigFunc), - Override(new(dtypes.SetStorageDealPieceCidBlocklistConfigFunc), modules.NewSetStorageDealPieceCidBlocklistConfigFunc), - Override(new(dtypes.ConsiderOfflineStorageDealsConfigFunc), modules.NewConsiderOfflineStorageDealsConfigFunc), - Override(new(dtypes.SetConsiderOfflineStorageDealsConfigFunc), modules.NewSetConsideringOfflineStorageDealsFunc), - Override(new(dtypes.ConsiderOfflineRetrievalDealsConfigFunc), modules.NewConsiderOfflineRetrievalDealsConfigFunc), - Override(new(dtypes.SetConsiderOfflineRetrievalDealsConfigFunc), modules.NewSetConsiderOfflineRetrievalDealsConfigFunc), - Override(new(dtypes.ConsiderVerifiedStorageDealsConfigFunc), modules.NewConsiderVerifiedStorageDealsConfigFunc), - Override(new(dtypes.SetConsiderVerifiedStorageDealsConfigFunc), modules.NewSetConsideringVerifiedStorageDealsFunc), - Override(new(dtypes.ConsiderUnverifiedStorageDealsConfigFunc), modules.NewConsiderUnverifiedStorageDealsConfigFunc), - Override(new(dtypes.SetConsiderUnverifiedStorageDealsConfigFunc), modules.NewSetConsideringUnverifiedStorageDealsFunc), - Override(new(dtypes.SetExpectedSealDurationFunc), modules.NewSetExpectedSealDurationFunc), - Override(new(dtypes.GetExpectedSealDurationFunc), modules.NewGetExpectedSealDurationFunc), - Override(new(dtypes.SetMaxDealStartDelayFunc), modules.NewSetMaxDealStartDelayFunc), - Override(new(dtypes.GetMaxDealStartDelayFunc), modules.NewGetMaxDealStartDelayFunc), - - If(cfg.Dealmaking.Filter != "", - Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(cfg.Dealmaking, dealfilter.CliStorageDealFilter(cfg.Dealmaking.Filter))), - ), - - If(cfg.Dealmaking.RetrievalFilter != "", - Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(dealfilter.CliRetrievalDealFilter(cfg.Dealmaking.RetrievalFilter))), - ), - Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(&cfg.Fees, storageadapter.PublishMsgConfig{ - Period: time.Duration(cfg.Dealmaking.PublishMsgPeriod), - MaxDealsPerMsg: cfg.Dealmaking.MaxDealsPerPublishMsg, - StartEpochSealingBuffer: cfg.Dealmaking.StartEpochSealingBuffer, - })), - Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(&cfg.Fees, &cfg.Dealmaking)), - ), - Override(new(config.SealerConfig), cfg.Storage), Override(new(config.ProvingConfig), cfg.Proving), Override(new(config.HarmonyDB), cfg.HarmonyDB), @@ -254,7 +144,7 @@ func ConfigStorageMiner(c interface{}) Option { ) } -func StorageMiner(out *api.StorageMiner, subsystemsCfg config.MinerSubsystemConfig) Option { +func StorageMiner(out *api.StorageMiner) Option { return Options( ApplyIf(func(s *Settings) bool { return s.Config }, Error(errors.New("the StorageMiner option must be set before Config option")), @@ -262,7 +152,6 @@ func StorageMiner(out *api.StorageMiner, subsystemsCfg config.MinerSubsystemConf func(s *Settings) error { s.nodeType = repo.StorageMiner - s.enableLibp2pNode = subsystemsCfg.EnableMarkets return nil }, diff --git a/node/config/def.go b/node/config/def.go index 2dd4b77eb2f..d8b8e0babb3 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -2,12 +2,8 @@ package config import ( "encoding" - "os" - "strconv" "time" - "github.com/ipfs/go-cid" - "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/network" @@ -18,24 +14,6 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) -const ( - // RetrievalPricingDefault configures the node to use the default retrieval pricing policy. - RetrievalPricingDefaultMode = "default" - // RetrievalPricingExternal configures the node to use the external retrieval pricing script - // configured by the user. - RetrievalPricingExternalMode = "external" -) - -// MaxTraversalLinks configures the maximum number of links to traverse in a DAG while calculating -// CommP and traversing a DAG with graphsync; invokes a budget on DAG depth and density. -var MaxTraversalLinks uint64 = 32 * (1 << 20) - -func init() { - if envMaxTraversal, err := strconv.ParseUint(os.Getenv("LOTUS_MAX_TRAVERSAL_LINKS"), 10, 64); err == nil { - MaxTraversalLinks = envMaxTraversal - } -} - func (b *BatchFeeConfig) FeeForSectors(nSectors int) abi.TokenAmount { return big.Add(big.Int(b.Base), big.Mul(big.NewInt(int64(nSectors)), big.Int(b.PerSector))) } @@ -77,8 +55,6 @@ func defCommon() Common { } } -var DefaultSimultaneousTransfers = uint64(20) - func DefaultDefaultMaxFee() types.FIL { return types.MustParseFIL("0.07") } @@ -90,10 +66,7 @@ func DefaultFullNode() *FullNode { Fees: FeeConfig{ DefaultMaxFee: DefaultDefaultMaxFee(), }, - Client: Client{ - SimultaneousTransfersForStorage: DefaultSimultaneousTransfers, - SimultaneousTransfersForRetrieval: DefaultSimultaneousTransfers, - }, + Chainstore: Chainstore{ EnableSplitstore: true, Splitstore: Splitstore{ @@ -193,52 +166,13 @@ func DefaultStorageMiner() *StorageMiner { }, Dealmaking: DealmakingConfig{ - ConsiderOnlineStorageDeals: true, - ConsiderOfflineStorageDeals: true, - ConsiderOnlineRetrievalDeals: true, - ConsiderOfflineRetrievalDeals: true, - ConsiderVerifiedStorageDeals: true, - ConsiderUnverifiedStorageDeals: true, - PieceCidBlocklist: []cid.Cid{}, - // TODO: It'd be nice to set this based on sector size - MaxDealStartDelay: Duration(time.Hour * 24 * 14), - ExpectedSealDuration: Duration(time.Hour * 24), - PublishMsgPeriod: Duration(time.Hour), - MaxDealsPerPublishMsg: 8, - MaxProviderCollateralMultiplier: 2, - - SimultaneousTransfersForStorage: DefaultSimultaneousTransfers, - SimultaneousTransfersForStoragePerClient: 0, - SimultaneousTransfersForRetrieval: DefaultSimultaneousTransfers, - StartEpochSealingBuffer: 480, // 480 epochs buffer == 4 hours from adding deal to sector to sector being sealed - - RetrievalPricing: &RetrievalPricing{ - Strategy: RetrievalPricingDefaultMode, - Default: &RetrievalPricingDefault{ - VerifiedDealsFreeTransfer: true, - }, - External: &RetrievalPricingExternal{ - Path: "", - }, - }, - }, - - IndexProvider: IndexProviderConfig{ - Enable: true, - EntriesCacheCapacity: 1024, - EntriesChunkSize: 16384, - // The default empty TopicName means it is inferred from network name, in the following - // format: "/indexer/ingest/" - TopicName: "", - PurgeCacheOnStart: false, }, Subsystems: MinerSubsystemConfig{ EnableMining: true, EnableSealing: true, EnableSectorStorage: true, - EnableMarkets: false, EnableSectorIndexDB: false, }, @@ -270,12 +204,6 @@ func DefaultStorageMiner() *StorageMiner { DealPublishControl: []string{}, }, - DAGStore: DAGStoreConfig{ - MaxConcurrentIndex: 5, - MaxConcurrencyStorageCalls: 100, - MaxConcurrentUnseals: 5, - GCInterval: Duration(1 * time.Minute), - }, HarmonyDB: HarmonyDB{ Hosts: []string{"127.0.0.1"}, Username: "yugabyte", @@ -327,46 +255,3 @@ const ( // worker. The scheduler may assign any task to this worker. ResourceFilteringDisabled = ResourceFilteringStrategy("disabled") ) - -func DefaultCurioConfig() *CurioConfig { - return &CurioConfig{ - Subsystems: CurioSubsystemsConfig{ - GuiAddress: ":4701", - BoostAdapters: []string{}, - }, - Fees: CurioFees{ - DefaultMaxFee: DefaultDefaultMaxFee(), - MaxPreCommitGasFee: types.MustParseFIL("0.025"), - MaxCommitGasFee: types.MustParseFIL("0.05"), - - MaxPreCommitBatchGasFee: BatchFeeConfig{ - Base: types.MustParseFIL("0"), - PerSector: types.MustParseFIL("0.02"), - }, - MaxCommitBatchGasFee: BatchFeeConfig{ - Base: types.MustParseFIL("0"), - PerSector: types.MustParseFIL("0.03"), // enough for 6 agg and 1nFIL base fee - }, - - MaxTerminateGasFee: types.MustParseFIL("0.5"), - MaxWindowPoStGasFee: types.MustParseFIL("5"), - MaxPublishDealsFee: types.MustParseFIL("0.05"), - }, - Addresses: []CurioAddresses{{ - PreCommitControl: []string{}, - CommitControl: []string{}, - TerminateControl: []string{}, - MinerAddresses: []string{}, - }}, - Proving: CurioProvingConfig{ - ParallelCheckLimit: 32, - PartitionCheckTimeout: Duration(20 * time.Minute), - SingleCheckTimeout: Duration(10 * time.Minute), - }, - Ingest: CurioIngestConfig{ - MaxQueueSDR: 8, // default to 8 sectors before sdr - MaxQueueTrees: 0, // default don't use this limit - MaxQueuePoRep: 0, // default don't use this limit - }, - } -} diff --git a/node/config/def_test.go b/node/config/def_test.go index 627b65a5631..2edcce2b59f 100644 --- a/node/config/def_test.go +++ b/node/config/def_test.go @@ -79,9 +79,3 @@ func TestDefaultMinerRoundtrip(t *testing.T) { fmt.Println(c2) require.True(t, reflect.DeepEqual(c, c2)) } - -func TestDefaultStorageMiner_IsEmpty(t *testing.T) { - subject := DefaultStorageMiner() - require.True(t, subject.IndexProvider.Enable) - require.Equal(t, "", subject.IndexProvider.TopicName) -} diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index 74549163e71..3f66344c809 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -85,30 +85,6 @@ your node if metadata log is disabled`, Comment: ``, }, }, - "Client": { - { - Name: "SimultaneousTransfersForStorage", - Type: "uint64", - - Comment: `The maximum number of simultaneous data transfers between the client -and storage providers for storage deals`, - }, - { - Name: "SimultaneousTransfersForRetrieval", - Type: "uint64", - - Comment: `The maximum number of simultaneous data transfers between the client -and storage providers for retrieval deals`, - }, - { - Name: "OffChainRetrieval", - Type: "bool", - - Comment: `Require that retrievals perform no on-chain operations. Paid retrievals -without existing payment channels with available funds will fail instead -of automatically performing on-chain operations.`, - }, - }, "Common": { { Name: "API", @@ -183,6 +159,30 @@ over the worker address if this flag is set.`, Comment: `MinerAddresses are the addresses of the miner actors to use for sending messages`, }, }, + "CurioAlerting": { + { + Name: "PagerDutyEventURL", + Type: "string", + + Comment: `PagerDutyEventURL is URL for PagerDuty.com Events API v2 URL. Events sent to this API URL are ultimately +routed to a PagerDuty.com service and processed. +The default is sufficient for integration with the stock commercial PagerDuty.com company's service.`, + }, + { + Name: "PageDutyIntegrationKey", + Type: "string", + + Comment: `PageDutyIntegrationKey is the integration key for a PagerDuty.com service. You can find this unique service +identifier in the integration page for the service.`, + }, + { + Name: "MinimumWalletBalance", + Type: "types.FIL", + + Comment: `MinimumWalletBalance is the minimum balance all active wallets. If the balance is below this value, an +alerts will be triggered for the wallet`, + }, + }, "CurioConfig": { { Name: "Subsystems", @@ -224,6 +224,12 @@ over the worker address if this flag is set.`, Name: "Apis", Type: "ApisConfig", + Comment: ``, + }, + { + Name: "Alerting", + Type: "CurioAlerting", + Comment: ``, }, }, @@ -584,8 +590,8 @@ uses all available network (or disk) bandwidth on the machine without causing bo Comment: `BoostAdapters is a list of tuples of miner address and port/ip to listen for market (e.g. boost) requests. This interface is compatible with the lotus-miner RPC, implementing a subset needed for storage market operations. -Strings should be in the format "actor:port" or "actor:ip:port". Default listen address is 0.0.0.0 -Example: "f0123:32100", "f0123:127.0.0.1:32100". Multiple addresses can be specified. +Strings should be in the format "actor:ip:port". IP cannot be 0.0.0.0. We recommend using a private IP. +Example: "f0123:127.0.0.1:32100". Multiple addresses can be specified. When a market node like boost gives Curio's market RPC a deal to placing into a sector, Curio will first store the deal data in a temporary location "Piece Park" before assigning it to a sector. This requires that at least one @@ -614,197 +620,13 @@ only need to be run on a single machine in the cluster.`, Comment: `The address that should listen for Web GUI requests.`, }, }, - "DAGStoreConfig": { - { - Name: "RootDir", - Type: "string", - - Comment: `Path to the dagstore root directory. This directory contains three -subdirectories, which can be symlinked to alternative locations if -need be: -- ./transients: caches unsealed deals that have been fetched from the -storage subsystem for serving retrievals. -- ./indices: stores shard indices. -- ./datastore: holds the KV store tracking the state of every shard -known to the DAG store. -Default value: /dagstore (split deployment) or -/dagstore (monolith deployment)`, - }, - { - Name: "MaxConcurrentIndex", - Type: "int", - - Comment: `The maximum amount of indexing jobs that can run simultaneously. -0 means unlimited. -Default value: 5.`, - }, - { - Name: "MaxConcurrentReadyFetches", - Type: "int", - - Comment: `The maximum amount of unsealed deals that can be fetched simultaneously -from the storage subsystem. 0 means unlimited. -Default value: 0 (unlimited).`, - }, - { - Name: "MaxConcurrentUnseals", - Type: "int", - - Comment: `The maximum amount of unseals that can be processed simultaneously -from the storage subsystem. 0 means unlimited. -Default value: 0 (unlimited).`, - }, - { - Name: "MaxConcurrencyStorageCalls", - Type: "int", - - Comment: `The maximum number of simultaneous inflight API calls to the storage -subsystem. -Default value: 100.`, - }, - { - Name: "GCInterval", - Type: "Duration", - - Comment: `The time between calls to periodic dagstore GC, in time.Duration string -representation, e.g. 1m, 5m, 1h. -Default value: 1 minute.`, - }, - }, "DealmakingConfig": { - { - Name: "ConsiderOnlineStorageDeals", - Type: "bool", - - Comment: `When enabled, the miner can accept online deals`, - }, - { - Name: "ConsiderOfflineStorageDeals", - Type: "bool", - - Comment: `When enabled, the miner can accept offline deals`, - }, - { - Name: "ConsiderOnlineRetrievalDeals", - Type: "bool", - - Comment: `When enabled, the miner can accept retrieval deals`, - }, - { - Name: "ConsiderOfflineRetrievalDeals", - Type: "bool", - - Comment: `When enabled, the miner can accept offline retrieval deals`, - }, - { - Name: "ConsiderVerifiedStorageDeals", - Type: "bool", - - Comment: `When enabled, the miner can accept verified deals`, - }, - { - Name: "ConsiderUnverifiedStorageDeals", - Type: "bool", - - Comment: `When enabled, the miner can accept unverified deals`, - }, - { - Name: "PieceCidBlocklist", - Type: "[]cid.Cid", - - Comment: `A list of Data CIDs to reject when making deals`, - }, - { - Name: "ExpectedSealDuration", - Type: "Duration", - - Comment: `Maximum expected amount of time getting the deal into a sealed sector will take -This includes the time the deal will need to get transferred and published -before being assigned to a sector`, - }, - { - Name: "MaxDealStartDelay", - Type: "Duration", - - Comment: `Maximum amount of time proposed deal StartEpoch can be in future`, - }, - { - Name: "PublishMsgPeriod", - Type: "Duration", - - Comment: `When a deal is ready to publish, the amount of time to wait for more -deals to be ready to publish before publishing them all as a batch`, - }, - { - Name: "MaxDealsPerPublishMsg", - Type: "uint64", - - Comment: `The maximum number of deals to include in a single PublishStorageDeals -message`, - }, - { - Name: "MaxProviderCollateralMultiplier", - Type: "uint64", - - Comment: `The maximum collateral that the provider will put up against a deal, -as a multiplier of the minimum collateral bound`, - }, - { - Name: "MaxStagingDealsBytes", - Type: "int64", - - Comment: `The maximum allowed disk usage size in bytes of staging deals not yet -passed to the sealing node by the markets service. 0 is unlimited.`, - }, - { - Name: "SimultaneousTransfersForStorage", - Type: "uint64", - - Comment: `The maximum number of parallel online data transfers for storage deals`, - }, - { - Name: "SimultaneousTransfersForStoragePerClient", - Type: "uint64", - - Comment: `The maximum number of simultaneous data transfers from any single client -for storage deals. -Unset by default (0), and values higher than SimultaneousTransfersForStorage -will have no effect; i.e. the total number of simultaneous data transfers -across all storage clients is bound by SimultaneousTransfersForStorage -regardless of this number.`, - }, - { - Name: "SimultaneousTransfersForRetrieval", - Type: "uint64", - - Comment: `The maximum number of parallel online data transfers for retrieval deals`, - }, { Name: "StartEpochSealingBuffer", Type: "uint64", Comment: `Minimum start epoch buffer to give time for sealing of sector with deal.`, }, - { - Name: "Filter", - Type: "string", - - Comment: `A command used for fine-grained evaluation of storage deals -see https://lotus.filecoin.io/storage-providers/advanced-configurations/market/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details`, - }, - { - Name: "RetrievalFilter", - Type: "string", - - Comment: `A command used for fine-grained evaluation of retrieval deals -see https://lotus.filecoin.io/storage-providers/advanced-configurations/market/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details`, - }, - { - Name: "RetrievalPricing", - Type: "*RetrievalPricing", - - Comment: ``, - }, }, "EventsConfig": { { @@ -927,12 +749,6 @@ Set to 0 to keep all mappings`, }, }, "FullNode": { - { - Name: "Client", - Type: "Client", - - Comment: ``, - }, { Name: "Wallet", Type: "Wallet", @@ -1018,51 +834,6 @@ in a cluster. Only 1 is required`, EnableMsgIndex enables indexing of messages on chain.`, }, }, - "IndexProviderConfig": { - { - Name: "Enable", - Type: "bool", - - Comment: `Enable set whether to enable indexing announcement to the network and expose endpoints that -allow indexer nodes to process announcements. Enabled by default.`, - }, - { - Name: "EntriesCacheCapacity", - Type: "int", - - Comment: `EntriesCacheCapacity sets the maximum capacity to use for caching the indexing advertisement -entries. Defaults to 1024 if not specified. The cache is evicted using LRU policy. The -maximum storage used by the cache is a factor of EntriesCacheCapacity, EntriesChunkSize and -the length of multihashes being advertised. For example, advertising 128-bit long multihashes -with the default EntriesCacheCapacity, and EntriesChunkSize means the cache size can grow to -256MiB when full.`, - }, - { - Name: "EntriesChunkSize", - Type: "int", - - Comment: `EntriesChunkSize sets the maximum number of multihashes to include in a single entries chunk. -Defaults to 16384 if not specified. Note that chunks are chained together for indexing -advertisements that include more multihashes than the configured EntriesChunkSize.`, - }, - { - Name: "TopicName", - Type: "string", - - Comment: `TopicName sets the topic name on which the changes to the advertised content are announced. -If not explicitly specified, the topic name is automatically inferred from the network name -in following format: '/indexer/ingest/' -Defaults to empty, which implies the topic name is inferred from network name.`, - }, - { - Name: "PurgeCacheOnStart", - Type: "bool", - - Comment: `PurgeCacheOnStart sets whether to clear any cached entries chunks when the provider engine -starts. By default, the cache is rehydrated from previously cached entries stored in -datastore if any is present.`, - }, - }, "JournalConfig": { { Name: "DisabledEvents", @@ -1263,12 +1034,6 @@ over the worker address if this flag is set.`, Comment: ``, }, - { - Name: "EnableMarkets", - Type: "bool", - - Comment: ``, - }, { Name: "EnableSectorIndexDB", Type: "bool", @@ -1499,46 +1264,6 @@ This property is used only if ElasticSearchTracer propery is set.`, Comment: `Auth token that will be passed with logs to elasticsearch - used for weighted peers score.`, }, }, - "RetrievalPricing": { - { - Name: "Strategy", - Type: "string", - - Comment: ``, - }, - { - Name: "Default", - Type: "*RetrievalPricingDefault", - - Comment: ``, - }, - { - Name: "External", - Type: "*RetrievalPricingExternal", - - Comment: ``, - }, - }, - "RetrievalPricingDefault": { - { - Name: "VerifiedDealsFreeTransfer", - Type: "bool", - - Comment: `VerifiedDealsFreeTransfer configures zero fees for data transfer for a retrieval deal -of a payloadCid that belongs to a verified storage deal. -This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "default". -default value is true`, - }, - }, - "RetrievalPricingExternal": { - { - Name: "Path", - Type: "string", - - Comment: `Path of the external script that will be run to price a retrieval deal. -This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "external".`, - }, - }, "SealerConfig": { { Name: "ParallelFetchLimit", @@ -1956,12 +1681,6 @@ HotstoreMaxSpaceTarget - HotstoreMaxSpaceSafetyBuffer`, Comment: ``, }, - { - Name: "IndexProvider", - Type: "IndexProviderConfig", - - Comment: ``, - }, { Name: "Proving", Type: "ProvingConfig", @@ -1992,12 +1711,6 @@ HotstoreMaxSpaceTarget - HotstoreMaxSpaceSafetyBuffer`, Comment: ``, }, - { - Name: "DAGStore", - Type: "DAGStoreConfig", - - Comment: ``, - }, { Name: "HarmonyDB", Type: "HarmonyDB", diff --git a/node/config/load.go b/node/config/load.go index 1d5a8745851..b8fe5dcbd0b 100644 --- a/node/config/load.go +++ b/node/config/load.go @@ -81,7 +81,7 @@ func FromReader(reader io.Reader, def interface{}, opts ...LoadCfgOpt) (interfac } for _, d := range movedFields { if md.IsDefined(d.Field...) { - fmt.Fprintf( + _, _ = fmt.Fprintf( warningOut, "WARNING: Use of deprecated configuration option '%s' will be removed in a future release, use '%s' instead\n", strings.Join(d.Field, "."), diff --git a/node/config/types.go b/node/config/types.go index c15df320fa9..c6c7aaef40c 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -1,8 +1,6 @@ package config import ( - "github.com/ipfs/go-cid" - "github.com/filecoin-project/lotus/chain/types" ) @@ -22,7 +20,6 @@ type Common struct { // FullNode is a full node config type FullNode struct { Common - Client Client Wallet Wallet Fees FeeConfig Chainstore Chainstore @@ -53,17 +50,14 @@ type Logging struct { type StorageMiner struct { Common - Subsystems MinerSubsystemConfig - Dealmaking DealmakingConfig - IndexProvider IndexProviderConfig - Proving ProvingConfig - Sealing SealingConfig - Storage SealerConfig - Fees MinerFeeConfig - Addresses MinerAddressConfig - DAGStore DAGStoreConfig - - HarmonyDB HarmonyDB + Subsystems MinerSubsystemConfig + Dealmaking DealmakingConfig + Proving ProvingConfig + Sealing SealingConfig + Storage SealerConfig + Fees MinerFeeConfig + Addresses MinerAddressConfig + HarmonyDB HarmonyDB } type CurioConfig struct { @@ -77,6 +71,7 @@ type CurioConfig struct { Ingest CurioIngestConfig Journal JournalConfig Apis ApisConfig + Alerting CurioAlerting } type ApisConfig struct { @@ -203,8 +198,8 @@ type CurioSubsystemsConfig struct { // BoostAdapters is a list of tuples of miner address and port/ip to listen for market (e.g. boost) requests. // This interface is compatible with the lotus-miner RPC, implementing a subset needed for storage market operations. - // Strings should be in the format "actor:port" or "actor:ip:port". Default listen address is 0.0.0.0 - // Example: "f0123:32100", "f0123:127.0.0.1:32100". Multiple addresses can be specified. + // Strings should be in the format "actor:ip:port". IP cannot be 0.0.0.0. We recommend using a private IP. + // Example: "f0123:127.0.0.1:32100". Multiple addresses can be specified. // // When a market node like boost gives Curio's market RPC a deal to placing into a sector, Curio will first store the // deal data in a temporary location "Piece Park" before assigning it to a sector. This requires that at least one @@ -228,50 +223,10 @@ type CurioSubsystemsConfig struct { GuiAddress string } -type DAGStoreConfig struct { - // Path to the dagstore root directory. This directory contains three - // subdirectories, which can be symlinked to alternative locations if - // need be: - // - ./transients: caches unsealed deals that have been fetched from the - // storage subsystem for serving retrievals. - // - ./indices: stores shard indices. - // - ./datastore: holds the KV store tracking the state of every shard - // known to the DAG store. - // Default value: /dagstore (split deployment) or - // /dagstore (monolith deployment) - RootDir string - - // The maximum amount of indexing jobs that can run simultaneously. - // 0 means unlimited. - // Default value: 5. - MaxConcurrentIndex int - - // The maximum amount of unsealed deals that can be fetched simultaneously - // from the storage subsystem. 0 means unlimited. - // Default value: 0 (unlimited). - MaxConcurrentReadyFetches int - - // The maximum amount of unseals that can be processed simultaneously - // from the storage subsystem. 0 means unlimited. - // Default value: 0 (unlimited). - MaxConcurrentUnseals int - - // The maximum number of simultaneous inflight API calls to the storage - // subsystem. - // Default value: 100. - MaxConcurrencyStorageCalls int - - // The time between calls to periodic dagstore GC, in time.Duration string - // representation, e.g. 1m, 5m, 1h. - // Default value: 1 minute. - GCInterval Duration -} - type MinerSubsystemConfig struct { EnableMining bool EnableSealing bool EnableSectorStorage bool - EnableMarkets bool // When enabled, the sector index will reside in an external database // as opposed to the local KV store in the miner process @@ -302,111 +257,8 @@ type MinerSubsystemConfig struct { } type DealmakingConfig struct { - // When enabled, the miner can accept online deals - ConsiderOnlineStorageDeals bool - // When enabled, the miner can accept offline deals - ConsiderOfflineStorageDeals bool - // When enabled, the miner can accept retrieval deals - ConsiderOnlineRetrievalDeals bool - // When enabled, the miner can accept offline retrieval deals - ConsiderOfflineRetrievalDeals bool - // When enabled, the miner can accept verified deals - ConsiderVerifiedStorageDeals bool - // When enabled, the miner can accept unverified deals - ConsiderUnverifiedStorageDeals bool - // A list of Data CIDs to reject when making deals - PieceCidBlocklist []cid.Cid - // Maximum expected amount of time getting the deal into a sealed sector will take - // This includes the time the deal will need to get transferred and published - // before being assigned to a sector - ExpectedSealDuration Duration - // Maximum amount of time proposed deal StartEpoch can be in future - MaxDealStartDelay Duration - // When a deal is ready to publish, the amount of time to wait for more - // deals to be ready to publish before publishing them all as a batch - PublishMsgPeriod Duration - // The maximum number of deals to include in a single PublishStorageDeals - // message - MaxDealsPerPublishMsg uint64 - // The maximum collateral that the provider will put up against a deal, - // as a multiplier of the minimum collateral bound - MaxProviderCollateralMultiplier uint64 - // The maximum allowed disk usage size in bytes of staging deals not yet - // passed to the sealing node by the markets service. 0 is unlimited. - MaxStagingDealsBytes int64 - // The maximum number of parallel online data transfers for storage deals - SimultaneousTransfersForStorage uint64 - // The maximum number of simultaneous data transfers from any single client - // for storage deals. - // Unset by default (0), and values higher than SimultaneousTransfersForStorage - // will have no effect; i.e. the total number of simultaneous data transfers - // across all storage clients is bound by SimultaneousTransfersForStorage - // regardless of this number. - SimultaneousTransfersForStoragePerClient uint64 - // The maximum number of parallel online data transfers for retrieval deals - SimultaneousTransfersForRetrieval uint64 // Minimum start epoch buffer to give time for sealing of sector with deal. StartEpochSealingBuffer uint64 - - // A command used for fine-grained evaluation of storage deals - // see https://lotus.filecoin.io/storage-providers/advanced-configurations/market/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details - Filter string - // A command used for fine-grained evaluation of retrieval deals - // see https://lotus.filecoin.io/storage-providers/advanced-configurations/market/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details - RetrievalFilter string - - RetrievalPricing *RetrievalPricing -} - -type IndexProviderConfig struct { - // Enable set whether to enable indexing announcement to the network and expose endpoints that - // allow indexer nodes to process announcements. Enabled by default. - Enable bool - - // EntriesCacheCapacity sets the maximum capacity to use for caching the indexing advertisement - // entries. Defaults to 1024 if not specified. The cache is evicted using LRU policy. The - // maximum storage used by the cache is a factor of EntriesCacheCapacity, EntriesChunkSize and - // the length of multihashes being advertised. For example, advertising 128-bit long multihashes - // with the default EntriesCacheCapacity, and EntriesChunkSize means the cache size can grow to - // 256MiB when full. - EntriesCacheCapacity int - - // EntriesChunkSize sets the maximum number of multihashes to include in a single entries chunk. - // Defaults to 16384 if not specified. Note that chunks are chained together for indexing - // advertisements that include more multihashes than the configured EntriesChunkSize. - EntriesChunkSize int - - // TopicName sets the topic name on which the changes to the advertised content are announced. - // If not explicitly specified, the topic name is automatically inferred from the network name - // in following format: '/indexer/ingest/' - // Defaults to empty, which implies the topic name is inferred from network name. - TopicName string - - // PurgeCacheOnStart sets whether to clear any cached entries chunks when the provider engine - // starts. By default, the cache is rehydrated from previously cached entries stored in - // datastore if any is present. - PurgeCacheOnStart bool -} - -type RetrievalPricing struct { - Strategy string // possible values: "default", "external" - - Default *RetrievalPricingDefault - External *RetrievalPricingExternal -} - -type RetrievalPricingExternal struct { - // Path of the external script that will be run to price a retrieval deal. - // This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "external". - Path string -} - -type RetrievalPricingDefault struct { - // VerifiedDealsFreeTransfer configures zero fees for data transfer for a retrieval deal - // of a payloadCid that belongs to a verified storage deal. - // This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "default". - // default value is true - VerifiedDealsFreeTransfer bool } type ProvingConfig struct { @@ -965,20 +817,6 @@ type Splitstore struct { } // // Full Node -type Client struct { - // The maximum number of simultaneous data transfers between the client - // and storage providers for storage deals - SimultaneousTransfersForStorage uint64 - // The maximum number of simultaneous data transfers between the client - // and storage providers for retrieval deals - SimultaneousTransfersForRetrieval uint64 - - // Require that retrievals perform no on-chain operations. Paid retrievals - // without existing payment channels with available funds will fail instead - // of automatically performing on-chain operations. - OffChainRetrieval bool -} - type Wallet struct { RemoteBackend string EnableLedger bool @@ -1109,3 +947,18 @@ type FaultReporterConfig struct { // rewards. This address should have adequate funds to cover gas fees. ConsensusFaultReporterAddress string } + +type CurioAlerting struct { + // PagerDutyEventURL is URL for PagerDuty.com Events API v2 URL. Events sent to this API URL are ultimately + // routed to a PagerDuty.com service and processed. + // The default is sufficient for integration with the stock commercial PagerDuty.com company's service. + PagerDutyEventURL string + + // PageDutyIntegrationKey is the integration key for a PagerDuty.com service. You can find this unique service + // identifier in the integration page for the service. + PageDutyIntegrationKey string + + // MinimumWalletBalance is the minimum balance all active wallets. If the balance is below this value, an + // alerts will be triggered for the wallet + MinimumWalletBalance types.FIL +} diff --git a/node/hello/hello.go b/node/hello/hello.go index cd1645d3e1a..ef15ece16c3 100644 --- a/node/hello/hello.go +++ b/node/hello/hello.go @@ -7,6 +7,7 @@ import ( "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" inet "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" @@ -121,7 +122,10 @@ func (hs *Service) HandleStream(s inet.Stream) { hs.pmgr.AddFilecoinPeer(s.Conn().RemotePeer()) } - ts, err := hs.syncer.FetchTipSet(context.Background(), s.Conn().RemotePeer(), types.NewTipSetKey(hmsg.HeaviestTipSet...)) + // We're trying to fetch the tipset from the peer that just said hello to us. No point in + // triggering any dials. + ctx := network.WithNoDial(context.Background(), "fetching filecoin hello tipset") + ts, err := hs.syncer.FetchTipSet(ctx, s.Conn().RemotePeer(), types.NewTipSetKey(hmsg.HeaviestTipSet...)) if err != nil { log.Errorf("failed to fetch tipset from peer during hello: %+v", err) return diff --git a/node/impl/client/car_helpers.go b/node/impl/client/car_helpers.go deleted file mode 100644 index c638b4bef81..00000000000 --- a/node/impl/client/car_helpers.go +++ /dev/null @@ -1,91 +0,0 @@ -package client - -import ( - "fmt" - "io" - - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/ipld/go-car/util" - "github.com/multiformats/go-varint" -) - -// ————————————————————————————————————————————————————————— -// -// This code is temporary, and should be deleted when -// https://github.com/ipld/go-car/issues/196 is resolved. -// -// ————————————————————————————————————————————————————————— - -func init() { - cbor.RegisterCborType(CarHeader{}) -} - -type CarHeader struct { - Roots []cid.Cid - Version uint64 -} - -func readHeader(r io.Reader) (*CarHeader, error) { - hb, err := ldRead(r, false) - if err != nil { - return nil, err - } - - var ch CarHeader - if err := cbor.DecodeInto(hb, &ch); err != nil { - return nil, fmt.Errorf("invalid header: %v", err) - } - - return &ch, nil -} - -func writeHeader(h *CarHeader, w io.Writer) error { - hb, err := cbor.DumpObject(h) - if err != nil { - return err - } - - return util.LdWrite(w, hb) -} - -func ldRead(r io.Reader, zeroLenAsEOF bool) ([]byte, error) { - l, err := varint.ReadUvarint(toByteReader(r)) - if err != nil { - // If the length of bytes read is non-zero when the error is EOF then signal an unclean EOF. - if l > 0 && err == io.EOF { - return nil, io.ErrUnexpectedEOF - } - return nil, err - } else if l == 0 && zeroLenAsEOF { - return nil, io.EOF - } - - buf := make([]byte, l) - if _, err := io.ReadFull(r, buf); err != nil { - return nil, err - } - - return buf, nil -} - -type readerPlusByte struct { - io.Reader -} - -func (rb readerPlusByte) ReadByte() (byte, error) { - return readByte(rb) -} - -func readByte(r io.Reader) (byte, error) { - var p [1]byte - _, err := io.ReadFull(r, p[:]) - return p[0], err -} - -func toByteReader(r io.Reader) io.ByteReader { - if br, ok := r.(io.ByteReader); ok { - return br - } - return &readerPlusByte{r} -} diff --git a/node/impl/client/client.go b/node/impl/client/client.go deleted file mode 100644 index c7bb252a10a..00000000000 --- a/node/impl/client/client.go +++ /dev/null @@ -1,1536 +0,0 @@ -package client - -import ( - "bufio" - "bytes" - "context" - "errors" - "fmt" - "io" - "os" - "sort" - "strings" - "sync" - "time" - - "github.com/ipfs/boxo/blockservice" - bstore "github.com/ipfs/boxo/blockstore" - offline "github.com/ipfs/boxo/exchange/offline" - "github.com/ipfs/boxo/files" - "github.com/ipfs/boxo/ipld/merkledag" - unixfile "github.com/ipfs/boxo/ipld/unixfs/file" - "github.com/ipfs/go-cid" - format "github.com/ipfs/go-ipld-format" - logging "github.com/ipfs/go-log/v2" - "github.com/ipld/go-car" - "github.com/ipld/go-car/util" - carv2 "github.com/ipld/go-car/v2" - carv2bs "github.com/ipld/go-car/v2/blockstore" - "github.com/ipld/go-ipld-prime" - "github.com/ipld/go-ipld-prime/datamodel" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - basicnode "github.com/ipld/go-ipld-prime/node/basic" - "github.com/ipld/go-ipld-prime/traversal" - "github.com/ipld/go-ipld-prime/traversal/selector" - "github.com/ipld/go-ipld-prime/traversal/selector/builder" - selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" - textselector "github.com/ipld/go-ipld-selector-text-lite" - "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/multiformats/go-multibase" - "go.uber.org/fx" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - cborutil "github.com/filecoin-project/go-cbor-util" - "github.com/filecoin-project/go-commp-utils/writer" - datatransfer "github.com/filecoin-project/go-data-transfer/v2" - "github.com/filecoin-project/go-fil-markets/discovery" - rm "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-fil-markets/storagemarket/network" - "github.com/filecoin-project/go-fil-markets/stores" - "github.com/filecoin-project/go-padreader" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" - "github.com/filecoin-project/go-state-types/dline" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/unixfs" - "github.com/filecoin-project/lotus/markets/retrievaladapter" - "github.com/filecoin-project/lotus/markets/storageadapter" - "github.com/filecoin-project/lotus/markets/utils" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/node/impl/full" - "github.com/filecoin-project/lotus/node/impl/paych" - "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/node/repo" - "github.com/filecoin-project/lotus/node/repo/imports" -) - -var log = logging.Logger("client") - -var DefaultHashFunction = unixfs.DefaultHashFunction - -// 8 days ~= SealDuration + PreCommit + MaxProveCommitDuration + 8 hour buffer -const dealStartBufferHours uint64 = 8 * 24 -const DefaultDAGStoreDir = "dagstore" - -type API struct { - fx.In - - full.ChainAPI - full.WalletAPI - paych.PaychAPI - full.StateAPI - - SMDealClient storagemarket.StorageClient - RetDiscovery discovery.PeerResolver - Retrieval rm.RetrievalClient - Chain *store.ChainStore - - // accessors for imports and retrievals. - Imports dtypes.ClientImportMgr - StorageBlockstoreAccessor storagemarket.BlockstoreAccessor - RtvlBlockstoreAccessor rm.BlockstoreAccessor - ApiBlockstoreAccessor *retrievaladapter.APIBlockstoreAccessor - - DataTransfer dtypes.ClientDataTransfer - Host host.Host - - Repo repo.LockedRepo -} - -func calcDealExpiration(minDuration uint64, md *dline.Info, startEpoch abi.ChainEpoch) abi.ChainEpoch { - // Make sure we give some time for the miner to seal - minExp := startEpoch + abi.ChainEpoch(minDuration) - - // Align on miners ProvingPeriodBoundary - exp := minExp + md.WPoStProvingPeriod - (minExp % md.WPoStProvingPeriod) + (md.PeriodStart % md.WPoStProvingPeriod) - 1 - // Should only be possible for miners created around genesis - for exp < minExp { - exp += md.WPoStProvingPeriod - } - - return exp -} - -// importManager converts the injected type to the required type. -func (a *API) importManager() *imports.Manager { - return a.Imports -} - -func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) { - return a.dealStarter(ctx, params, false) -} - -func (a *API) ClientStatelessDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) { - return a.dealStarter(ctx, params, true) -} - -func (a *API) dealStarter(ctx context.Context, params *api.StartDealParams, isStateless bool) (*cid.Cid, error) { - if isStateless { - if params.Data.TransferType != storagemarket.TTManual { - return nil, xerrors.Errorf("invalid transfer type %s for stateless storage deal", params.Data.TransferType) - } - if !params.EpochPrice.IsZero() { - return nil, xerrors.New("stateless storage deals can only be initiated with storage price of 0") - } - } else if params.Data.TransferType == storagemarket.TTGraphsync { - bs, onDone, err := a.dealBlockstore(params.Data.Root) - if err != nil { - return nil, xerrors.Errorf("failed to find blockstore for root CID: %w", err) - } - if has, err := bs.Has(ctx, params.Data.Root); err != nil { - return nil, xerrors.Errorf("failed to query blockstore for root CID: %w", err) - } else if !has { - return nil, xerrors.Errorf("failed to find root CID in blockstore: %w", err) - } - onDone() - } - - walletKey, err := a.StateAccountKey(ctx, params.Wallet, types.EmptyTSK) - if err != nil { - return nil, xerrors.Errorf("failed resolving params.Wallet addr (%s): %w", params.Wallet, err) - } - - exist, err := a.WalletHas(ctx, walletKey) - if err != nil { - return nil, xerrors.Errorf("failed getting addr from wallet (%s): %w", params.Wallet, err) - } - if !exist { - return nil, xerrors.Errorf("provided address doesn't exist in wallet") - } - - mi, err := a.StateMinerInfo(ctx, params.Miner, types.EmptyTSK) - if err != nil { - return nil, xerrors.Errorf("failed getting peer ID: %w", err) - } - - md, err := a.StateMinerProvingDeadline(ctx, params.Miner, types.EmptyTSK) - if err != nil { - return nil, xerrors.Errorf("failed getting miner's deadline info: %w", err) - } - - if uint64(params.Data.PieceSize.Padded()) > uint64(mi.SectorSize) { - return nil, xerrors.New("data doesn't fit in a sector") - } - - dealStart := params.DealStartEpoch - if dealStart <= 0 { // unset, or explicitly 'epoch undefined' - ts, err := a.ChainHead(ctx) - if err != nil { - return nil, xerrors.Errorf("failed getting chain height: %w", err) - } - - blocksPerHour := 60 * 60 / build.BlockDelaySecs - dealStart = ts.Height() + abi.ChainEpoch(dealStartBufferHours*blocksPerHour) // TODO: Get this from storage ask - } - - networkVersion, err := a.StateNetworkVersion(ctx, types.EmptyTSK) - if err != nil { - return nil, xerrors.Errorf("failed to get network version: %w", err) - } - - st, err := miner.PreferredSealProofTypeFromWindowPoStType(networkVersion, mi.WindowPoStProofType, false) - if err != nil { - return nil, xerrors.Errorf("failed to get seal proof type: %w", err) - } - - // regular flow - if !isStateless { - providerInfo := utils.NewStorageProviderInfo(params.Miner, mi.Worker, mi.SectorSize, *mi.PeerId, mi.Multiaddrs) - - result, err := a.SMDealClient.ProposeStorageDeal(ctx, storagemarket.ProposeStorageDealParams{ - Addr: params.Wallet, - Info: &providerInfo, - Data: params.Data, - StartEpoch: dealStart, - EndEpoch: calcDealExpiration(params.MinBlocksDuration, md, dealStart), - Price: params.EpochPrice, - Collateral: params.ProviderCollateral, - Rt: st, - FastRetrieval: params.FastRetrieval, - VerifiedDeal: params.VerifiedDeal, - }) - - if err != nil { - return nil, xerrors.Errorf("failed to start deal: %w", err) - } - - return &result.ProposalCid, nil - } - - // - // stateless flow from here to the end - // - - label, err := markettypes.NewLabelFromString(params.Data.Root.Encode(multibase.MustNewEncoder('u'))) - if err != nil { - return nil, xerrors.Errorf("failed to encode label: %w", err) - } - - dealProposal := &markettypes.DealProposal{ - PieceCID: *params.Data.PieceCid, - PieceSize: params.Data.PieceSize.Padded(), - Client: walletKey, - Provider: params.Miner, - Label: label, - StartEpoch: dealStart, - EndEpoch: calcDealExpiration(params.MinBlocksDuration, md, dealStart), - StoragePricePerEpoch: big.Zero(), - ProviderCollateral: params.ProviderCollateral, - ClientCollateral: big.Zero(), - VerifiedDeal: params.VerifiedDeal, - } - - if dealProposal.ProviderCollateral.IsZero() { - networkCollateral, err := a.StateDealProviderCollateralBounds(ctx, params.Data.PieceSize.Padded(), params.VerifiedDeal, types.EmptyTSK) - if err != nil { - return nil, xerrors.Errorf("failed to determine minimum provider collateral: %w", err) - } - dealProposal.ProviderCollateral = networkCollateral.Min - } - - dealProposalSerialized, err := cborutil.Dump(dealProposal) - if err != nil { - return nil, xerrors.Errorf("failed to serialize deal proposal: %w", err) - } - - dealProposalSig, err := a.WalletSign(ctx, walletKey, dealProposalSerialized) - if err != nil { - return nil, xerrors.Errorf("failed to sign proposal : %w", err) - } - - dealProposalSigned := &markettypes.ClientDealProposal{ - Proposal: *dealProposal, - ClientSignature: *dealProposalSig, - } - dStream, err := network.NewFromLibp2pHost(a.Host, - // params duplicated from .../node/modules/client.go - // https://github.com/filecoin-project/lotus/pull/5961#discussion_r629768011 - network.RetryParameters(time.Second, 5*time.Minute, 15, 5), - ).NewDealStream(ctx, *mi.PeerId) - if err != nil { - return nil, xerrors.Errorf("opening dealstream to %s/%s failed: %w", params.Miner, *mi.PeerId, err) - } - - if err = dStream.WriteDealProposal(network.Proposal{ - FastRetrieval: true, - DealProposal: dealProposalSigned, - Piece: &storagemarket.DataRef{ - TransferType: storagemarket.TTManual, - Root: params.Data.Root, - PieceCid: params.Data.PieceCid, - PieceSize: params.Data.PieceSize, - }, - }); err != nil { - return nil, xerrors.Errorf("sending deal proposal failed: %w", err) - } - - resp, _, err := dStream.ReadDealResponse() - if err != nil { - return nil, xerrors.Errorf("reading proposal response failed: %w", err) - } - - dealProposalIpld, err := cborutil.AsIpld(dealProposalSigned) - if err != nil { - return nil, xerrors.Errorf("serializing proposal node failed: %w", err) - } - - if !dealProposalIpld.Cid().Equals(resp.Response.Proposal) { - return nil, xerrors.Errorf("provider returned proposal cid %s but we expected %s", resp.Response.Proposal, dealProposalIpld.Cid()) - } - - if resp.Response.State != storagemarket.StorageDealWaitingForData { - return nil, xerrors.Errorf("provider returned unexpected state %d for proposal %s, with message: %s", resp.Response.State, resp.Response.Proposal, resp.Response.Message) - } - - return &resp.Response.Proposal, nil -} - -func (a *API) ClientListDeals(ctx context.Context) ([]api.DealInfo, error) { - deals, err := a.SMDealClient.ListLocalDeals(ctx) - if err != nil { - return nil, err - } - - // Get a map of transfer ID => DataTransfer - dataTransfersByID, err := a.transfersByID(ctx) - if err != nil { - return nil, err - } - - out := make([]api.DealInfo, len(deals)) - for k, v := range deals { - // Find the data transfer associated with this deal - var transferCh *api.DataTransferChannel - if v.TransferChannelID != nil { - if ch, ok := dataTransfersByID[*v.TransferChannelID]; ok { - transferCh = &ch - } - } - - out[k] = a.newDealInfoWithTransfer(transferCh, v) - } - - return out, nil -} - -func (a *API) transfersByID(ctx context.Context) (map[datatransfer.ChannelID]api.DataTransferChannel, error) { - inProgressChannels, err := a.DataTransfer.InProgressChannels(ctx) - if err != nil { - return nil, err - } - - dataTransfersByID := make(map[datatransfer.ChannelID]api.DataTransferChannel, len(inProgressChannels)) - for id, channelState := range inProgressChannels { - ch := api.NewDataTransferChannel(a.Host.ID(), channelState) - dataTransfersByID[id] = ch - } - return dataTransfersByID, nil -} - -func (a *API) ClientGetDealInfo(ctx context.Context, d cid.Cid) (*api.DealInfo, error) { - v, err := a.SMDealClient.GetLocalDeal(ctx, d) - if err != nil { - return nil, err - } - - di := a.newDealInfo(ctx, v) - return &di, nil -} - -func (a *API) ClientGetDealUpdates(ctx context.Context) (<-chan api.DealInfo, error) { - updates := make(chan api.DealInfo) - - unsub := a.SMDealClient.SubscribeToEvents(func(_ storagemarket.ClientEvent, deal storagemarket.ClientDeal) { - updates <- a.newDealInfo(ctx, deal) - }) - - go func() { - defer unsub() - <-ctx.Done() - }() - - return updates, nil -} - -func (a *API) newDealInfo(ctx context.Context, v storagemarket.ClientDeal) api.DealInfo { - // Find the data transfer associated with this deal - var transferCh *api.DataTransferChannel - if v.TransferChannelID != nil { - state, err := a.DataTransfer.ChannelState(ctx, *v.TransferChannelID) - - // Note: If there was an error just ignore it, as the data transfer may - // be not found if it's no longer active - if err == nil { - ch := api.NewDataTransferChannel(a.Host.ID(), state) - ch.Stages = state.Stages() - transferCh = &ch - } - } - - di := a.newDealInfoWithTransfer(transferCh, v) - di.DealStages = v.DealStages - return di -} - -func (a *API) newDealInfoWithTransfer(transferCh *api.DataTransferChannel, v storagemarket.ClientDeal) api.DealInfo { - return api.DealInfo{ - ProposalCid: v.ProposalCid, - DataRef: v.DataRef, - State: v.State, - Message: v.Message, - Provider: v.Proposal.Provider, - PieceCID: v.Proposal.PieceCID, - Size: uint64(v.Proposal.PieceSize.Unpadded()), - PricePerEpoch: v.Proposal.StoragePricePerEpoch, - Duration: uint64(v.Proposal.Duration()), - DealID: v.DealID, - CreationTime: v.CreationTime.Time(), - Verified: v.Proposal.VerifiedDeal, - TransferChannelID: v.TransferChannelID, - DataTransfer: transferCh, - } -} - -func (a *API) ClientHasLocal(_ context.Context, root cid.Cid) (bool, error) { - _, onDone, err := a.dealBlockstore(root) - if err != nil { - return false, err - } - onDone() - return true, nil -} - -func (a *API) ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]api.QueryOffer, error) { - peers, err := a.RetDiscovery.GetPeers(root) - if err != nil { - return nil, err - } - - out := make([]api.QueryOffer, 0, len(peers)) - for _, p := range peers { - if piece != nil && !piece.Equals(*p.PieceCID) { - continue - } - - // do not rely on local data with respect to peer id - // fetch an up-to-date miner peer id from chain - mi, err := a.StateMinerInfo(ctx, p.Address, types.EmptyTSK) - if err != nil { - return nil, err - } - pp := rm.RetrievalPeer{ - Address: p.Address, - ID: *mi.PeerId, - } - - out = append(out, a.makeRetrievalQuery(ctx, pp, root, piece, rm.QueryParams{})) - } - - return out, nil -} - -func (a *API) ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) { - mi, err := a.StateMinerInfo(ctx, miner, types.EmptyTSK) - if err != nil { - return api.QueryOffer{}, err - } - rp := rm.RetrievalPeer{ - Address: miner, - ID: *mi.PeerId, - } - return a.makeRetrievalQuery(ctx, rp, root, piece, rm.QueryParams{}), nil -} - -func (a *API) makeRetrievalQuery(ctx context.Context, rp rm.RetrievalPeer, payload cid.Cid, piece *cid.Cid, qp rm.QueryParams) api.QueryOffer { - queryResponse, err := a.Retrieval.Query(ctx, rp, payload, qp) - if err != nil { - return api.QueryOffer{Err: err.Error(), Miner: rp.Address, MinerPeer: rp} - } - var errStr string - switch queryResponse.Status { - case rm.QueryResponseAvailable: - errStr = "" - case rm.QueryResponseUnavailable: - errStr = fmt.Sprintf("retrieval query offer was unavailable: %s", queryResponse.Message) - case rm.QueryResponseError: - errStr = fmt.Sprintf("retrieval query offer errored: %s", queryResponse.Message) - } - - return api.QueryOffer{ - Root: payload, - Piece: piece, - Size: queryResponse.Size, - MinPrice: queryResponse.PieceRetrievalPrice(), - UnsealPrice: queryResponse.UnsealPrice, - PricePerByte: queryResponse.MinPricePerByte, - PaymentInterval: queryResponse.MaxPaymentInterval, - PaymentIntervalIncrease: queryResponse.MaxPaymentIntervalIncrease, - Miner: queryResponse.PaymentAddress, // TODO: check - MinerPeer: rp, - Err: errStr, - } -} - -func (a *API) ClientImport(ctx context.Context, ref api.FileRef) (res *api.ImportRes, err error) { - var ( - imgr = a.importManager() - id imports.ID - root cid.Cid - carPath string - ) - - id, err = imgr.CreateImport() - if err != nil { - return nil, xerrors.Errorf("failed to create import: %w", err) - } - - if ref.IsCAR { - // user gave us a CAR file, use it as-is - // validate that it's either a carv1 or carv2, and has one root. - f, err := os.Open(ref.Path) - if err != nil { - return nil, xerrors.Errorf("failed to open CAR file: %w", err) - } - defer f.Close() //nolint:errcheck - - hd, err := car.ReadHeader(bufio.NewReader(f)) - if err != nil { - return nil, xerrors.Errorf("failed to read CAR header: %w", err) - } - if len(hd.Roots) != 1 { - return nil, xerrors.New("car file can have one and only one root") - } - if hd.Version != 1 && hd.Version != 2 { - return nil, xerrors.Errorf("car version must be 1 or 2, is %d", hd.Version) - } - - carPath = ref.Path - root = hd.Roots[0] - } else { - carPath, err = imgr.AllocateCAR(id) - if err != nil { - return nil, xerrors.Errorf("failed to create car path for import: %w", err) - } - - // remove the import if something went wrong. - defer func() { - if err != nil { - _ = os.Remove(carPath) - _ = imgr.Remove(id) - } - }() - - // perform the unixfs chunking. - root, err = unixfs.CreateFilestore(ctx, ref.Path, carPath) - if err != nil { - return nil, xerrors.Errorf("failed to import file using unixfs: %w", err) - } - } - - if err = imgr.AddLabel(id, imports.LSource, "import"); err != nil { - return nil, err - } - if err = imgr.AddLabel(id, imports.LFileName, ref.Path); err != nil { - return nil, err - } - if err = imgr.AddLabel(id, imports.LCARPath, carPath); err != nil { - return nil, err - } - if err = imgr.AddLabel(id, imports.LRootCid, root.String()); err != nil { - return nil, err - } - return &api.ImportRes{ - Root: root, - ImportID: id, - }, nil -} - -func (a *API) ClientRemoveImport(ctx context.Context, id imports.ID) error { - info, err := a.importManager().Info(id) - if err != nil { - return xerrors.Errorf("failed to get import metadata: %w", err) - } - - owner := info.Labels[imports.LCAROwner] - path := info.Labels[imports.LCARPath] - - // CARv2 file was not provided by the user, delete it. - if path != "" && owner == imports.CAROwnerImportMgr { - _ = os.Remove(path) - } - - return a.importManager().Remove(id) -} - -// ClientImportLocal imports a standard file into this node as a UnixFS payload, -// storing it in a CARv2 file. Note that this method is NOT integrated with the -// IPFS blockstore. That is, if client-side IPFS integration is enabled, this -// method won't import the file into that -func (a *API) ClientImportLocal(ctx context.Context, r io.Reader) (cid.Cid, error) { - file := files.NewReaderFile(r) - - // write payload to temp file - id, err := a.importManager().CreateImport() - if err != nil { - return cid.Undef, err - } - if err := a.importManager().AddLabel(id, imports.LSource, "import-local"); err != nil { - return cid.Undef, err - } - - path, err := a.importManager().AllocateCAR(id) - if err != nil { - return cid.Undef, err - } - - // writing a carv2 requires knowing the root ahead of time, which makes - // streaming cases impossible. - // https://github.com/ipld/go-car/issues/196 - // we work around this limitation by informing a placeholder root CID of the - // same length as our unixfs chunking strategy will generate. - // once the DAG is formed and the root is calculated, we overwrite the - // inner carv1 header with the final root. - - b, err := unixfs.CidBuilder() - if err != nil { - return cid.Undef, err - } - - // placeholder payload needs to be larger than inline CID threshold; 256 - // bytes is a safe value. - placeholderRoot, err := b.Sum(make([]byte, 256)) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to calculate placeholder root: %w", err) - } - - bs, err := carv2bs.OpenReadWrite(path, []cid.Cid{placeholderRoot}, carv2bs.UseWholeCIDs(true)) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to create carv2 read/write blockstore: %w", err) - } - - root, err := unixfs.Build(ctx, file, bs, false) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to build unixfs dag: %w", err) - } - - err = bs.Finalize() - if err != nil { - return cid.Undef, xerrors.Errorf("failed to finalize carv2 read/write blockstore: %w", err) - } - - // record the root in the import manager. - if err := a.importManager().AddLabel(id, imports.LRootCid, root.String()); err != nil { - return cid.Undef, xerrors.Errorf("failed to record root CID in import manager: %w", err) - } - - // now go ahead and overwrite the root in the carv1 header. - reader, err := carv2.OpenReader(path) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to create car reader: %w", err) - } - - // save the header offset. - headerOff := reader.Header.DataOffset - - // read the old header. - dr, err := reader.DataReader() - if err != nil { - return cid.Undef, fmt.Errorf("failed to get car data reader: %w", err) - } - header, err := readHeader(dr) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to read car reader: %w", err) - } - _ = reader.Close() // close the CAR reader. - - // write the old header into a buffer. - var oldBuf bytes.Buffer - if err = writeHeader(header, &oldBuf); err != nil { - return cid.Undef, xerrors.Errorf("failed to write header into buffer: %w", err) - } - - // replace the root. - header.Roots = []cid.Cid{root} - - // write the new header into a buffer. - var newBuf bytes.Buffer - err = writeHeader(header, &newBuf) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to write header into buffer: %w", err) - } - - // verify the length matches. - if newBuf.Len() != oldBuf.Len() { - return cid.Undef, xerrors.Errorf("failed to replace carv1 header; length mismatch (old: %d, new: %d)", oldBuf.Len(), newBuf.Len()) - } - - // open the file again, seek to the header position, and write. - f, err := os.OpenFile(path, os.O_WRONLY, 0755) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to open car: %w", err) - } - defer f.Close() //nolint:errcheck - - n, err := f.WriteAt(newBuf.Bytes(), int64(headerOff)) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to write new header to car (bytes written: %d): %w", n, err) - } - return root, nil -} - -func (a *API) ClientListImports(_ context.Context) ([]api.Import, error) { - ids, err := a.importManager().List() - if err != nil { - return nil, xerrors.Errorf("failed to fetch imports: %w", err) - } - - out := make([]api.Import, len(ids)) - for i, id := range ids { - info, err := a.importManager().Info(id) - if err != nil { - out[i] = api.Import{ - Key: id, - Err: xerrors.Errorf("getting info: %w", err).Error(), - } - continue - } - - ai := api.Import{ - Key: id, - Source: info.Labels[imports.LSource], - FilePath: info.Labels[imports.LFileName], - CARPath: info.Labels[imports.LCARPath], - } - - if info.Labels[imports.LRootCid] != "" { - c, err := cid.Parse(info.Labels[imports.LRootCid]) - if err != nil { - ai.Err = err.Error() - } else { - ai.Root = &c - } - } - - out[i] = ai - } - - return out, nil -} - -func (a *API) ClientCancelRetrievalDeal(ctx context.Context, dealID rm.DealID) error { - cerr := make(chan error) - go func() { - err := a.Retrieval.CancelDeal(dealID) - - select { - case cerr <- err: - case <-ctx.Done(): - } - }() - - select { - case err := <-cerr: - if err != nil { - return xerrors.Errorf("failed to cancel retrieval deal: %w", err) - } - - return nil - case <-ctx.Done(): - return xerrors.Errorf("context timeout while canceling retrieval deal: %w", ctx.Err()) - } -} - -func getDataSelector(dps *api.Selector, matchPath bool) (datamodel.Node, error) { - sel := selectorparse.CommonSelector_ExploreAllRecursively - if dps != nil { - - if strings.HasPrefix(string(*dps), "{") { - var err error - sel, err = selectorparse.ParseJSONSelector(string(*dps)) - if err != nil { - return nil, xerrors.Errorf("failed to parse json-selector '%s': %w", *dps, err) - } - } else { - ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) - - selspec, err := textselector.SelectorSpecFromPath( - textselector.Expression(*dps), matchPath, - - ssb.ExploreRecursive( - selector.RecursionLimitNone(), - ssb.ExploreUnion(ssb.Matcher(), ssb.ExploreAll(ssb.ExploreRecursiveEdge())), - ), - ) - if err != nil { - return nil, xerrors.Errorf("failed to parse text-selector '%s': %w", *dps, err) - } - - sel = selspec.Node() - log.Infof("partial retrieval of datamodel-path-selector %s/*", *dps) - } - } - - return sel, nil -} - -func (a *API) ClientRetrieve(ctx context.Context, params api.RetrievalOrder) (*api.RestrievalRes, error) { - sel, err := getDataSelector(params.DataSelector, false) - if err != nil { - return nil, err - } - - di, err := a.doRetrieval(ctx, params, sel) - if err != nil { - return nil, err - } - - return &api.RestrievalRes{ - DealID: di, - }, nil -} - -func (a *API) doRetrieval(ctx context.Context, order api.RetrievalOrder, sel datamodel.Node) (rm.DealID, error) { - if order.MinerPeer == nil || order.MinerPeer.ID == "" { - mi, err := a.StateMinerInfo(ctx, order.Miner, types.EmptyTSK) - if err != nil { - return 0, err - } - - order.MinerPeer = &rm.RetrievalPeer{ - ID: *mi.PeerId, - Address: order.Miner, - } - } - - if order.Total.Int == nil { - return 0, xerrors.Errorf("cannot make retrieval deal for null total") - } - - if order.Size == 0 { - return 0, xerrors.Errorf("cannot make retrieval deal for zero bytes") - } - - ppb := types.BigDiv(big.Sub(order.Total, order.UnsealPrice), types.NewInt(order.Size)) - - params, err := rm.NewParamsV1(ppb, order.PaymentInterval, order.PaymentIntervalIncrease, sel, order.Piece, order.UnsealPrice) - if err != nil { - return 0, xerrors.Errorf("Error in retrieval params: %s", err) - } - - id := a.Retrieval.NextID() - - if order.RemoteStore != nil { - if err := a.ApiBlockstoreAccessor.RegisterDealToRetrievalStore(id, *order.RemoteStore); err != nil { - return 0, xerrors.Errorf("registering api store: %w", err) - } - } - - id, err = a.Retrieval.Retrieve( - ctx, - id, - order.Root, - params, - order.Total, - *order.MinerPeer, - order.Client, - order.Miner, - ) - - if err != nil { - return 0, xerrors.Errorf("Retrieve failed: %w", err) - } - - return id, nil -} - -func (a *API) ClientRetrieveWait(ctx context.Context, deal rm.DealID) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - subscribeEvents := make(chan rm.ClientDealState, 1) - - unsubscribe := a.Retrieval.SubscribeToEvents(func(event rm.ClientEvent, state rm.ClientDealState) { - // We'll check the deal IDs inside consumeAllEvents. - if state.ID != deal { - return - } - select { - case <-ctx.Done(): - case subscribeEvents <- state: - } - }) - defer unsubscribe() - - { - state, err := a.Retrieval.GetDeal(deal) - if err != nil { - return xerrors.Errorf("getting deal state: %w", err) - } - select { - case subscribeEvents <- state: - default: // already have an event queued from the subscription - } - } - - for { - select { - case <-ctx.Done(): - return xerrors.New("Retrieval Timed Out") - case state := <-subscribeEvents: - switch state.Status { - case rm.DealStatusCompleted: - return nil - case rm.DealStatusRejected: - return xerrors.Errorf("Retrieval Proposal Rejected: %s", state.Message) - case rm.DealStatusCancelled: - return xerrors.Errorf("Retrieval was cancelled externally: %s", state.Message) - case - rm.DealStatusDealNotFound, - rm.DealStatusErrored: - return xerrors.Errorf("Retrieval Error: %s", state.Message) - } - } - } -} - -type ExportDest struct { - Writer io.Writer - Path string -} - -func (ed *ExportDest) doWrite(cb func(io.Writer) error) error { - if ed.Writer != nil { - return cb(ed.Writer) - } - - f, err := os.OpenFile(ed.Path, os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - return err - } - - if err := cb(f); err != nil { - _ = f.Close() - return err - } - - return f.Close() -} - -func (a *API) ClientExport(ctx context.Context, exportRef api.ExportRef, ref api.FileRef) error { - return a.ClientExportInto(ctx, exportRef, ref.IsCAR, ExportDest{Path: ref.Path}) -} - -func (a *API) ClientExportInto(ctx context.Context, exportRef api.ExportRef, car bool, dest ExportDest) error { - proxyBss, retrieveIntoIPFS := a.RtvlBlockstoreAccessor.(*retrievaladapter.ProxyBlockstoreAccessor) - carBss, retrieveIntoCAR := a.RtvlBlockstoreAccessor.(*retrievaladapter.CARBlockstoreAccessor) - carPath := exportRef.FromLocalCAR - - if carPath == "" { - if !retrieveIntoIPFS && !retrieveIntoCAR { - return xerrors.Errorf("unsupported retrieval blockstore accessor") - } - - if retrieveIntoCAR { - carPath = carBss.PathFor(exportRef.DealID) - } - } - - var retrievalBs bstore.Blockstore - if retrieveIntoIPFS { - retrievalBs = proxyBss.Blockstore - } else { - cbs, err := stores.ReadOnlyFilestore(carPath) - if err != nil { - return err - } - defer cbs.Close() //nolint:errcheck - retrievalBs = cbs - } - - dserv := merkledag.NewDAGService(blockservice.New(retrievalBs, offline.Exchange(retrievalBs))) - - // Are we outputting a CAR? - if car { - // not IPFS and we do full selection - just extract the CARv1 from the CARv2 we stored the retrieval in - if !retrieveIntoIPFS && len(exportRef.DAGs) == 0 && dest.Writer == nil { - return carv2.ExtractV1File(carPath, dest.Path) - } - } - - roots, err := parseDagSpec(ctx, exportRef.Root, exportRef.DAGs, dserv, car) - if err != nil { - return xerrors.Errorf("parsing dag spec: %w", err) - } - if car { - return a.outputCAR(ctx, dserv, retrievalBs, exportRef.Root, roots, dest) - } - - if len(roots) != 1 { - return xerrors.Errorf("unixfs retrieval requires one root node, got %d", len(roots)) - } - - return a.outputUnixFS(ctx, roots[0].root, dserv, dest) -} - -func (a *API) outputCAR(ctx context.Context, ds format.DAGService, bs bstore.Blockstore, root cid.Cid, dags []dagSpec, dest ExportDest) error { - // generating a CARv1 from the configured blockstore - roots := make([]cid.Cid, len(dags)) - for i, dag := range dags { - roots[i] = dag.root - } - - var lk sync.Mutex - - return dest.doWrite(func(w io.Writer) error { - - if err := car.WriteHeader(&car.CarHeader{ - Roots: roots, - Version: 1, - }, w); err != nil { - return fmt.Errorf("failed to write car header: %s", err) - } - - cs := cid.NewSet() - - for _, dagSpec := range dags { - dagSpec := dagSpec - - if err := utils.TraverseDag( - ctx, - ds, - root, - dagSpec.selector, - func(node format.Node) error { - // if we're exporting merkle proofs for this dag, export all nodes read by the traversal - if dagSpec.exportAll { - lk.Lock() - defer lk.Unlock() - if cs.Visit(node.Cid()) { - err := util.LdWrite(w, node.Cid().Bytes(), node.RawData()) - if err != nil { - return xerrors.Errorf("writing block data: %w", err) - } - } - } - return nil - }, - func(p traversal.Progress, n ipld.Node, r traversal.VisitReason) error { - if !dagSpec.exportAll && r == traversal.VisitReason_SelectionMatch { - var c cid.Cid - if p.LastBlock.Link == nil { - c = root - } else { - cidLnk, castOK := p.LastBlock.Link.(cidlink.Link) - if !castOK { - return xerrors.Errorf("cidlink cast unexpectedly failed on '%s'", p.LastBlock.Link) - } - - c = cidLnk.Cid - } - - if cs.Visit(c) { - nb, err := bs.Get(ctx, c) - if err != nil { - return xerrors.Errorf("getting block data: %w", err) - } - - err = util.LdWrite(w, c.Bytes(), nb.RawData()) - if err != nil { - return xerrors.Errorf("writing block data: %w", err) - } - } - - return nil - } - return nil - }, - ); err != nil { - return xerrors.Errorf("error while traversing car dag: %w", err) - } - } - - return nil - }) -} - -func (a *API) outputUnixFS(ctx context.Context, root cid.Cid, ds format.DAGService, dest ExportDest) error { - nd, err := ds.Get(ctx, root) - if err != nil { - return xerrors.Errorf("ClientRetrieve: %w", err) - } - file, err := unixfile.NewUnixfsFile(ctx, ds, nd) - if err != nil { - return xerrors.Errorf("ClientRetrieve: %w", err) - } - - if dest.Writer == nil { - return files.WriteTo(file, dest.Path) - } - - switch f := file.(type) { - case files.File: - _, err = io.Copy(dest.Writer, f) - if err != nil { - return err - } - return nil - default: - return fmt.Errorf("file type %T is not supported", nd) - } -} - -type dagSpec struct { - root cid.Cid - selector ipld.Node - exportAll bool -} - -func parseDagSpec(ctx context.Context, root cid.Cid, dsp []api.DagSpec, ds format.DAGService, car bool) ([]dagSpec, error) { - if len(dsp) == 0 { - return []dagSpec{ - { - root: root, - selector: nil, - }, - }, nil - } - - out := make([]dagSpec, len(dsp)) - for i, spec := range dsp { - out[i].exportAll = spec.ExportMerkleProof - - if spec.DataSelector == nil { - return nil, xerrors.Errorf("invalid DagSpec at position %d: `DataSelector` can not be nil", i) - } - - // reify selector - var err error - out[i].selector, err = getDataSelector(spec.DataSelector, car && spec.ExportMerkleProof) - if err != nil { - return nil, err - } - - // find the pointed-at root node within the containing ds - var rsn ipld.Node - - if strings.HasPrefix(string(*spec.DataSelector), "{") { - var err error - rsn, err = selectorparse.ParseJSONSelector(string(*spec.DataSelector)) - if err != nil { - return nil, xerrors.Errorf("failed to parse json-selector '%s': %w", *spec.DataSelector, err) - } - } else { - selspec, _ := textselector.SelectorSpecFromPath(textselector.Expression(*spec.DataSelector), car && spec.ExportMerkleProof, nil) //nolint:errcheck - rsn = selspec.Node() - } - - var newRoot cid.Cid - var errHalt = errors.New("halt walk") - if err := utils.TraverseDag( - ctx, - ds, - root, - rsn, - nil, - func(p traversal.Progress, n ipld.Node, r traversal.VisitReason) error { - if r == traversal.VisitReason_SelectionMatch { - if !car && p.LastBlock.Path.String() != p.Path.String() { - return xerrors.Errorf("unsupported selection path '%s' does not correspond to a block boundary (a.k.a. CID link)", p.Path.String()) - } - - if p.LastBlock.Link == nil { - // this is likely the root node that we've matched here - newRoot = root - return errHalt - } - - cidLnk, castOK := p.LastBlock.Link.(cidlink.Link) - if !castOK { - return xerrors.Errorf("cidlink cast unexpectedly failed on '%s'", p.LastBlock.Link) - } - - newRoot = cidLnk.Cid - - return errHalt - } - return nil - }, - ); err != nil && err != errHalt { - return nil, xerrors.Errorf("error while locating partial retrieval sub-root: %w", err) - } - - if newRoot == cid.Undef { - return nil, xerrors.Errorf("path selection does not match a node within %s", root) - } - - out[i].root = newRoot - } - - return out, nil -} - -func (a *API) ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, error) { - deals, err := a.Retrieval.ListDeals() - if err != nil { - return nil, err - } - dataTransfersByID, err := a.transfersByID(ctx) - if err != nil { - return nil, err - } - out := make([]api.RetrievalInfo, 0, len(deals)) - for _, v := range deals { - // Find the data transfer associated with this deal - var transferCh *api.DataTransferChannel - if v.ChannelID != nil { - if ch, ok := dataTransfersByID[*v.ChannelID]; ok { - transferCh = &ch - } - } - out = append(out, a.newRetrievalInfoWithTransfer(transferCh, v)) - } - sort.Slice(out, func(a, b int) bool { - return out[a].ID < out[b].ID - }) - return out, nil -} - -func (a *API) ClientGetRetrievalUpdates(ctx context.Context) (<-chan api.RetrievalInfo, error) { - updates := make(chan api.RetrievalInfo) - - unsub := a.Retrieval.SubscribeToEvents(func(evt rm.ClientEvent, deal rm.ClientDealState) { - update := a.newRetrievalInfo(ctx, deal) - update.Event = &evt - select { - case updates <- update: - case <-ctx.Done(): - } - }) - - go func() { - defer unsub() - <-ctx.Done() - }() - - return updates, nil -} - -func (a *API) newRetrievalInfoWithTransfer(ch *api.DataTransferChannel, deal rm.ClientDealState) api.RetrievalInfo { - return api.RetrievalInfo{ - PayloadCID: deal.PayloadCID, - ID: deal.ID, - PieceCID: deal.PieceCID, - PricePerByte: deal.PricePerByte, - UnsealPrice: deal.UnsealPrice, - Status: deal.Status, - Message: deal.Message, - Provider: deal.Sender, - BytesReceived: deal.TotalReceived, - BytesPaidFor: deal.BytesPaidFor, - TotalPaid: deal.FundsSpent, - TransferChannelID: deal.ChannelID, - DataTransfer: ch, - } -} - -func (a *API) newRetrievalInfo(ctx context.Context, v rm.ClientDealState) api.RetrievalInfo { - // Find the data transfer associated with this deal - var transferCh *api.DataTransferChannel - if v.ChannelID != nil { - state, err := a.DataTransfer.ChannelState(ctx, *v.ChannelID) - - // Note: If there was an error just ignore it, as the data transfer may - // be not found if it's no longer active - if err == nil { - ch := api.NewDataTransferChannel(a.Host.ID(), state) - ch.Stages = state.Stages() - transferCh = &ch - } - } - - return a.newRetrievalInfoWithTransfer(transferCh, v) -} - -const dealProtoPrefix = "/fil/storage/mk/" - -func (a *API) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*api.StorageAsk, error) { - mi, err := a.StateMinerInfo(ctx, miner, types.EmptyTSK) - if err != nil { - return nil, xerrors.Errorf("failed getting miner info: %w", err) - } - - info := utils.NewStorageProviderInfo(miner, mi.Worker, mi.SectorSize, p, mi.Multiaddrs) - ask, err := a.SMDealClient.GetAsk(ctx, info) - if err != nil { - return nil, err - } - res := &api.StorageAsk{ - Response: ask, - } - - ps, err := a.Host.Peerstore().GetProtocols(p) - if err != nil { - return nil, err - } - for _, s := range ps { - if strings.HasPrefix(string(s), dealProtoPrefix) { - res.DealProtocols = append(res.DealProtocols, string(s)) - } - } - sort.Strings(res.DealProtocols) - - return res, nil -} - -func (a *API) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet, error) { - rdr, err := os.Open(inpath) - if err != nil { - return nil, err - } - defer rdr.Close() //nolint:errcheck - - // check that the data is a car file; if it's not, retrieval won't work - _, err = car.ReadHeader(bufio.NewReader(rdr)) - if err != nil { - return nil, xerrors.Errorf("not a car file: %w", err) - } - - if _, err := rdr.Seek(0, io.SeekStart); err != nil { - return nil, xerrors.Errorf("seek to start: %w", err) - } - - w := &writer.Writer{} - _, err = io.CopyBuffer(w, rdr, make([]byte, writer.CommPBuf)) - if err != nil { - return nil, xerrors.Errorf("copy into commp writer: %w", err) - } - - commp, err := w.Sum() - if err != nil { - return nil, xerrors.Errorf("computing commP failed: %w", err) - } - - return &api.CommPRet{ - Root: commp.PieceCID, - Size: commp.PieceSize.Unpadded(), - }, nil -} - -type lenWriter int64 - -func (w *lenWriter) Write(p []byte) (n int, err error) { - *w += lenWriter(len(p)) - return len(p), nil -} - -func (a *API) ClientDealSize(ctx context.Context, root cid.Cid) (api.DataSize, error) { - bs, onDone, err := a.dealBlockstore(root) - if err != nil { - return api.DataSize{}, err - } - defer onDone() - - dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) - - var w lenWriter - err = car.WriteCar(ctx, dag, []cid.Cid{root}, &w) - if err != nil { - return api.DataSize{}, err - } - - up := padreader.PaddedSize(uint64(w)) - - return api.DataSize{ - PayloadSize: int64(w), - PieceSize: up.Padded(), - }, nil -} - -func (a *API) ClientDealPieceCID(ctx context.Context, root cid.Cid) (api.DataCIDSize, error) { - bs, onDone, err := a.dealBlockstore(root) - if err != nil { - return api.DataCIDSize{}, err - } - defer onDone() - - dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) - w := &writer.Writer{} - bw := bufio.NewWriterSize(w, int(writer.CommPBuf)) - - err = car.WriteCar(ctx, dag, []cid.Cid{root}, w) - if err != nil { - return api.DataCIDSize{}, err - } - - if err := bw.Flush(); err != nil { - return api.DataCIDSize{}, err - } - - dataCIDSize, err := w.Sum() - return api.DataCIDSize(dataCIDSize), err -} - -func (a *API) ClientGenCar(ctx context.Context, ref api.FileRef, outputPath string) error { - // create a temporary import to represent this job and obtain a staging CAR. - id, err := a.importManager().CreateImport() - if err != nil { - return xerrors.Errorf("failed to create temporary import: %w", err) - } - defer a.importManager().Remove(id) //nolint:errcheck - - tmp, err := a.importManager().AllocateCAR(id) - if err != nil { - return xerrors.Errorf("failed to allocate temporary CAR: %w", err) - } - defer os.Remove(tmp) //nolint:errcheck - - // generate and import the UnixFS DAG into a filestore (positional reference) CAR. - root, err := unixfs.CreateFilestore(ctx, ref.Path, tmp) - if err != nil { - return xerrors.Errorf("failed to import file using unixfs: %w", err) - } - - // open the positional reference CAR as a filestore. - fs, err := stores.ReadOnlyFilestore(tmp) - if err != nil { - return xerrors.Errorf("failed to open filestore from carv2 in path %s: %w", tmp, err) - } - defer fs.Close() //nolint:errcheck - - f, err := os.Create(outputPath) - if err != nil { - return err - } - - // build a dense deterministic CAR (dense = containing filled leaves) - if err := car.NewSelectiveCar( - ctx, - fs, - []car.Dag{{ - Root: root, - Selector: selectorparse.CommonSelector_ExploreAllRecursively, - }}, - car.MaxTraversalLinks(config.MaxTraversalLinks), - ).Write( - f, - ); err != nil { - return xerrors.Errorf("failed to write CAR to output file: %w", err) - } - - return f.Close() -} - -func (a *API) ClientListDataTransfers(ctx context.Context) ([]api.DataTransferChannel, error) { - inProgressChannels, err := a.DataTransfer.InProgressChannels(ctx) - if err != nil { - return nil, err - } - - apiChannels := make([]api.DataTransferChannel, 0, len(inProgressChannels)) - for _, channelState := range inProgressChannels { - apiChannels = append(apiChannels, api.NewDataTransferChannel(a.Host.ID(), channelState)) - } - - return apiChannels, nil -} - -func (a *API) ClientDataTransferUpdates(ctx context.Context) (<-chan api.DataTransferChannel, error) { - channels := make(chan api.DataTransferChannel) - - unsub := a.DataTransfer.SubscribeToEvents(func(evt datatransfer.Event, channelState datatransfer.ChannelState) { - channel := api.NewDataTransferChannel(a.Host.ID(), channelState) - select { - case <-ctx.Done(): - case channels <- channel: - } - }) - - go func() { - defer unsub() - <-ctx.Done() - }() - - return channels, nil -} - -func (a *API) ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error { - selfPeer := a.Host.ID() - if isInitiator { - return a.DataTransfer.RestartDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: selfPeer, Responder: otherPeer, ID: transferID}) - } - return a.DataTransfer.RestartDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: otherPeer, Responder: selfPeer, ID: transferID}) -} - -func (a *API) ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error { - selfPeer := a.Host.ID() - if isInitiator { - return a.DataTransfer.CloseDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: selfPeer, Responder: otherPeer, ID: transferID}) - } - return a.DataTransfer.CloseDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: otherPeer, Responder: selfPeer, ID: transferID}) -} - -func (a *API) ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error { - return a.Retrieval.TryRestartInsufficientFunds(paymentChannel) -} - -func (a *API) ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error) { - ststr, ok := storagemarket.DealStates[statusCode] - if !ok { - return "", fmt.Errorf("no such deal state %d", statusCode) - } - - return ststr, nil -} - -// dealBlockstore picks the source blockstore for a storage deal; either the -// IPFS blockstore, or an import CARv2 file. It also returns a function that -// must be called when done. -func (a *API) dealBlockstore(root cid.Cid) (bstore.Blockstore, func(), error) { - switch acc := a.StorageBlockstoreAccessor.(type) { - case *storageadapter.ImportsBlockstoreAccessor: - bs, err := acc.Get(root) - if err != nil { - return nil, nil, xerrors.Errorf("no import found for root %s: %w", root, err) - } - - doneFn := func() { - _ = acc.Done(root) //nolint:errcheck - } - return bs, doneFn, nil - - case *storageadapter.ProxyBlockstoreAccessor: - return acc.Blockstore, func() {}, nil - - default: - return nil, nil, xerrors.Errorf("unsupported blockstore accessor type: %T", acc) - } -} diff --git a/node/impl/client/client_test.go b/node/impl/client/client_test.go deleted file mode 100644 index 67a35013166..00000000000 --- a/node/impl/client/client_test.go +++ /dev/null @@ -1,136 +0,0 @@ -// stm: #unit -package client - -import ( - "bytes" - "context" - "embed" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/ipfs/boxo/blockservice" - blockstore "github.com/ipfs/boxo/blockstore" - offline "github.com/ipfs/boxo/exchange/offline" - "github.com/ipfs/boxo/files" - "github.com/ipfs/boxo/ipld/merkledag" - unixfile "github.com/ipfs/boxo/ipld/unixfs/file" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - dssync "github.com/ipfs/go-datastore/sync" - "github.com/ipld/go-car" - carv2 "github.com/ipld/go-car/v2" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/markets/storageadapter" - "github.com/filecoin-project/lotus/node/repo/imports" -) - -//go:embed testdata/* -var testdata embed.FS - -func TestImportLocal(t *testing.T) { - // stm: @CLIENT_STORAGE_DEALS_IMPORT_LOCAL_001, @CLIENT_RETRIEVAL_FIND_001 - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - dir := t.TempDir() - im := imports.NewManager(ds, dir) - ctx := context.Background() - - a := &API{ - Imports: im, - StorageBlockstoreAccessor: storageadapter.NewImportsBlockstoreAccessor(im), - } - - b, err := testdata.ReadFile("testdata/payload.txt") - require.NoError(t, err) - - // stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001 - root, err := a.ClientImportLocal(ctx, bytes.NewReader(b)) - require.NoError(t, err) - require.NotEqual(t, cid.Undef, root) - - list, err := a.ClientListImports(ctx) - require.NoError(t, err) - require.Len(t, list, 1) - - it := list[0] - require.Equal(t, root, *it.Root) - require.True(t, strings.HasPrefix(it.CARPath, dir)) - - // stm: @CLIENT_DATA_HAS_LOCAL_001 - local, err := a.ClientHasLocal(ctx, root) - require.NoError(t, err) - require.True(t, local) - - order := api.ExportRef{ - Root: root, - FromLocalCAR: it.CARPath, - } - - // retrieve as UnixFS. - out1 := filepath.Join(dir, "retrieval1.data") // as unixfs - out2 := filepath.Join(dir, "retrieval2.data") // as car - err = a.ClientExport(ctx, order, api.FileRef{ - Path: out1, - }) - require.NoError(t, err) - - outBytes, err := os.ReadFile(out1) - require.NoError(t, err) - require.Equal(t, b, outBytes) - - err = a.ClientExport(ctx, order, api.FileRef{ - Path: out2, - IsCAR: true, - }) - require.NoError(t, err) - - // open the CARv2 being custodied by the import manager - orig, err := carv2.OpenReader(it.CARPath) - require.NoError(t, err) - - // open the CARv1 we just exported - exported, err := carv2.OpenReader(out2) - require.NoError(t, err) - - require.EqualValues(t, 1, exported.Version) - require.EqualValues(t, 2, orig.Version) - - origRoots, err := orig.Roots() - require.NoError(t, err) - require.Len(t, origRoots, 1) - - exportedRoots, err := exported.Roots() - require.NoError(t, err) - require.Len(t, exportedRoots, 1) - - require.EqualValues(t, origRoots, exportedRoots) - - // recreate the unixfs dag, and see if it matches the original file byte by byte - // import the car into a memory blockstore, then export the unixfs file. - bs := blockstore.NewBlockstore(datastore.NewMapDatastore()) - r, err := exported.DataReader() - require.NoError(t, err) - _, err = car.LoadCar(ctx, bs, r) - require.NoError(t, err) - - dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) - - nd, err := dag.Get(ctx, exportedRoots[0]) - require.NoError(t, err) - - file, err := unixfile.NewUnixfsFile(ctx, dag, nd) - require.NoError(t, err) - - exportedPath := filepath.Join(dir, "exported.data") - err = files.WriteTo(file, exportedPath) - require.NoError(t, err) - - exportedBytes, err := os.ReadFile(exportedPath) - require.NoError(t, err) - - // compare original file to recreated unixfs file. - require.Equal(t, b, exportedBytes) -} diff --git a/node/impl/client/testdata/duplicate_blocks.txt b/node/impl/client/testdata/duplicate_blocks.txt deleted file mode 100644 index 53695d7b95f..00000000000 --- a/node/impl/client/testdata/duplicate_blocks.txt +++ /dev/null @@ -1 +0,0 @@ -aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccdddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd \ No newline at end of file diff --git a/node/impl/client/testdata/payload.txt b/node/impl/client/testdata/payload.txt deleted file mode 100644 index fd4a2f3c1ff..00000000000 --- a/node/impl/client/testdata/payload.txt +++ /dev/null @@ -1,49 +0,0 @@ -Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Vitae semper quis lectus nulla at volutpat diam ut venenatis. Ac tortor dignissim convallis aenean et tortor at. Faucibus ornare suspendisse sed nisi lacus sed. Commodo ullamcorper a lacus vestibulum sed arcu non. Est pellentesque elit ullamcorper dignissim. Quam quisque id diam vel quam. Pretium aenean pharetra magna ac. In nulla posuere sollicitudin aliquam ultrices. Sed arcu non odio euismod lacinia at. Suspendisse ultrices gravida dictum fusce ut placerat orci nulla pellentesque. Feugiat vivamus at augue eget arcu. - -Pellentesque nec nam aliquam sem et tortor. Vitae tortor condimentum lacinia quis vel. Cras pulvinar mattis nunc sed. In massa tempor nec feugiat. Ornare arcu odio ut sem nulla. Diam maecenas sed enim ut sem. Pretium vulputate sapien nec sagittis. Bibendum arcu vitae elementum curabitur vitae nunc sed velit dignissim. Duis ut diam quam nulla porttitor massa. Viverra mauris in aliquam sem fringilla ut morbi. Ullamcorper eget nulla facilisi etiam dignissim. Vulputate mi sit amet mauris commodo quis imperdiet massa tincidunt. Nunc consequat interdum varius sit. Nunc mi ipsum faucibus vitae aliquet nec ullamcorper. Nunc sed augue lacus viverra. Lobortis scelerisque fermentum dui faucibus in ornare quam. Urna neque viverra justo nec ultrices. Varius vel pharetra vel turpis nunc eget lorem dolor sed. - -Feugiat nisl pretium fusce id velit ut tortor pretium. Lorem dolor sed viverra ipsum nunc aliquet bibendum. Ultrices vitae auctor eu augue ut lectus. Pharetra massa massa ultricies mi quis. Nibh cras pulvinar mattis nunc sed blandit libero. Ac felis donec et odio pellentesque diam volutpat. Lectus proin nibh nisl condimentum id venenatis. Quis vel eros donec ac odio. Commodo sed egestas egestas fringilla phasellus faucibus scelerisque eleifend donec. Adipiscing diam donec adipiscing tristique. - -Tempus imperdiet nulla malesuada pellentesque elit eget gravida cum sociis. Libero nunc consequat interdum varius sit. Et pharetra pharetra massa massa. Feugiat pretium nibh ipsum consequat. Amet commodo nulla facilisi nullam vehicula. Ornare arcu dui vivamus arcu felis bibendum ut tristique. At erat pellentesque adipiscing commodo elit at imperdiet dui. Auctor neque vitae tempus quam pellentesque nec nam aliquam sem. Eget velit aliquet sagittis id consectetur. Enim diam vulputate ut pharetra sit amet aliquam id diam. Eget velit aliquet sagittis id consectetur purus ut faucibus pulvinar. Amet porttitor eget dolor morbi. Felis eget velit aliquet sagittis id. Facilisis magna etiam tempor orci eu. Lacus suspendisse faucibus interdum posuere lorem. Pharetra et ultrices neque ornare aenean euismod. Platea dictumst quisque sagittis purus. - -Quis varius quam quisque id diam vel quam elementum. Augue mauris augue neque gravida in fermentum et sollicitudin. Sapien nec sagittis aliquam malesuada bibendum arcu. Urna duis convallis convallis tellus id interdum velit. Tellus in hac habitasse platea dictumst vestibulum. Fames ac turpis egestas maecenas pharetra convallis. Diam volutpat commodo sed egestas egestas fringilla phasellus faucibus. Placerat orci nulla pellentesque dignissim enim sit amet venenatis. Sed adipiscing diam donec adipiscing. Praesent elementum facilisis leo vel fringilla est. Sed enim ut sem viverra aliquet eget sit amet tellus. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra. Turpis egestas pretium aenean pharetra magna ac placerat vestibulum. Massa id neque aliquam vestibulum morbi blandit cursus risus. Vitae congue eu consequat ac. Egestas erat imperdiet sed euismod nisi porta lorem mollis aliquam. Dolor purus non enim praesent elementum facilisis. Ultrices mi tempus imperdiet nulla malesuada pellentesque elit. In est ante in nibh. - -Facilisis gravida neque convallis a. Urna nunc id cursus metus aliquam eleifend mi. Lacus luctus accumsan tortor posuere ac. Molestie nunc non blandit massa. Iaculis urna id volutpat lacus laoreet non. Cursus vitae congue mauris rhoncus aenean. Nunc vel risus commodo viverra maecenas. A pellentesque sit amet porttitor eget dolor morbi. Leo vel orci porta non pulvinar neque laoreet suspendisse. Sit amet facilisis magna etiam tempor. Consectetur a erat nam at lectus urna duis convallis convallis. Vestibulum morbi blandit cursus risus at ultrices. Dolor purus non enim praesent elementum. Adipiscing elit pellentesque habitant morbi tristique senectus et netus et. Et odio pellentesque diam volutpat commodo sed egestas egestas fringilla. Leo vel fringilla est ullamcorper eget nulla. Dui ut ornare lectus sit amet. Erat pellentesque adipiscing commodo elit at imperdiet dui accumsan sit. - -Tristique senectus et netus et. Pellentesque diam volutpat commodo sed egestas egestas fringilla. Mauris pharetra et ultrices neque ornare aenean. Amet tellus cras adipiscing enim. Convallis aenean et tortor at risus viverra adipiscing at. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra justo. Dictumst vestibulum rhoncus est pellentesque elit. Fringilla ut morbi tincidunt augue interdum velit euismod in pellentesque. Dictum at tempor commodo ullamcorper a lacus vestibulum. Sed viverra tellus in hac habitasse platea. Sed id semper risus in hendrerit. In hendrerit gravida rutrum quisque non tellus orci ac. Sit amet risus nullam eget. Sit amet est placerat in egestas erat imperdiet sed. In nisl nisi scelerisque eu ultrices. Sit amet mattis vulputate enim nulla aliquet. - -Dignissim suspendisse in est ante in nibh mauris cursus. Vitae proin sagittis nisl rhoncus. Id leo in vitae turpis massa sed elementum. Lobortis elementum nibh tellus molestie nunc non blandit massa enim. Arcu dictum varius duis at consectetur. Suspendisse faucibus interdum posuere lorem ipsum dolor sit amet consectetur. Imperdiet nulla malesuada pellentesque elit eget gravida cum sociis. Sed adipiscing diam donec adipiscing. Purus sit amet volutpat consequat mauris nunc congue nisi vitae. Elementum nisi quis eleifend quam adipiscing vitae proin sagittis nisl. Mattis ullamcorper velit sed ullamcorper morbi tincidunt ornare massa. Sit amet nisl purus in mollis nunc sed. Turpis tincidunt id aliquet risus feugiat in ante. Id diam maecenas ultricies mi eget mauris pharetra et ultrices. - -Aliquam purus sit amet luctus venenatis lectus magna fringilla urna. Id diam vel quam elementum pulvinar. Elementum sagittis vitae et leo duis. Viverra aliquet eget sit amet tellus cras adipiscing enim eu. Et tortor at risus viverra adipiscing at in tellus integer. Purus in massa tempor nec feugiat. Augue neque gravida in fermentum et sollicitudin ac orci. Sodales ut eu sem integer vitae justo eget magna fermentum. Netus et malesuada fames ac. Augue interdum velit euismod in. Sed elementum tempus egestas sed sed risus pretium. Mattis vulputate enim nulla aliquet porttitor lacus luctus. Dui vivamus arcu felis bibendum ut tristique et egestas quis. - -Viverra justo nec ultrices dui sapien. Quisque egestas diam in arcu cursus euismod quis viverra nibh. Nam libero justo laoreet sit amet cursus sit amet. Lacus sed viverra tellus in hac habitasse. Blandit aliquam etiam erat velit scelerisque in. Ut sem nulla pharetra diam sit amet nisl suscipit adipiscing. Diam sollicitudin tempor id eu nisl nunc. Eget duis at tellus at urna condimentum mattis. Urna porttitor rhoncus dolor purus non enim praesent elementum facilisis. Sed turpis tincidunt id aliquet risus feugiat. Est velit egestas dui id ornare arcu odio ut sem. Nibh sit amet commodo nulla facilisi nullam vehicula. Sit amet consectetur adipiscing elit duis tristique sollicitudin. Eu facilisis sed odio morbi. Massa id neque aliquam vestibulum morbi. In eu mi bibendum neque egestas congue quisque egestas. Massa sed elementum tempus egestas sed sed risus. Quam elementum pulvinar etiam non. At augue eget arcu dictum varius duis at consectetur lorem. - -Penatibus et magnis dis parturient montes nascetur ridiculus. Dictumst quisque sagittis purus sit amet volutpat consequat. Bibendum at varius vel pharetra. Sed adipiscing diam donec adipiscing tristique risus nec feugiat in. Phasellus faucibus scelerisque eleifend donec pretium. Vitae tortor condimentum lacinia quis vel eros. Ac tincidunt vitae semper quis lectus nulla at volutpat diam. Eget sit amet tellus cras adipiscing. Morbi tristique senectus et netus. Nullam vehicula ipsum a arcu cursus vitae congue mauris rhoncus. Auctor urna nunc id cursus metus aliquam eleifend. Ultrices vitae auctor eu augue. Eu non diam phasellus vestibulum lorem sed risus ultricies. Fames ac turpis egestas sed tempus. Volutpat blandit aliquam etiam erat. Dictum varius duis at consectetur lorem. Sit amet volutpat consequat mauris nunc congue. Volutpat sed cras ornare arcu dui vivamus arcu felis. - -Scelerisque fermentum dui faucibus in ornare quam viverra. Interdum velit laoreet id donec ultrices tincidunt arcu. Netus et malesuada fames ac. Netus et malesuada fames ac turpis. Suscipit tellus mauris a diam maecenas sed enim ut sem. Id velit ut tortor pretium. Neque aliquam vestibulum morbi blandit cursus risus at. Cum sociis natoque penatibus et magnis dis parturient. Lobortis elementum nibh tellus molestie nunc non blandit. Ipsum dolor sit amet consectetur adipiscing elit duis tristique. Amet nisl purus in mollis. Amet massa vitae tortor condimentum lacinia quis vel eros donec. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra justo. - -Nullam ac tortor vitae purus faucibus. Dis parturient montes nascetur ridiculus mus mauris. Molestie at elementum eu facilisis sed odio morbi. Scelerisque felis imperdiet proin fermentum leo vel orci porta. Lectus proin nibh nisl condimentum id venenatis a. Eget nullam non nisi est sit amet facilisis. Hendrerit gravida rutrum quisque non tellus orci ac auctor. Ut faucibus pulvinar elementum integer enim. Rhoncus dolor purus non enim praesent elementum facilisis. Enim sed faucibus turpis in eu mi bibendum. Faucibus nisl tincidunt eget nullam. - -Cursus risus at ultrices mi tempus imperdiet nulla malesuada pellentesque. Pretium nibh ipsum consequat nisl vel pretium lectus quam. Semper viverra nam libero justo laoreet sit amet cursus sit. Augue eget arcu dictum varius duis at consectetur lorem donec. Et malesuada fames ac turpis. Erat nam at lectus urna duis convallis convallis. Dictum sit amet justo donec enim. Urna condimentum mattis pellentesque id nibh tortor id. Morbi tempus iaculis urna id. Lectus proin nibh nisl condimentum id venenatis a condimentum. Nibh sit amet commodo nulla facilisi nullam vehicula. Dui faucibus in ornare quam. Gravida arcu ac tortor dignissim convallis aenean. Consectetur adipiscing elit pellentesque habitant morbi tristique. Pulvinar elementum integer enim neque volutpat ac tincidunt vitae. Pharetra pharetra massa massa ultricies mi quis hendrerit. Dictum at tempor commodo ullamcorper a lacus vestibulum sed. Mattis pellentesque id nibh tortor id. Ultricies integer quis auctor elit sed vulputate. Pretium vulputate sapien nec sagittis aliquam malesuada. - -Auctor augue mauris augue neque gravida. Porttitor lacus luctus accumsan tortor posuere ac ut. Urna neque viverra justo nec ultrices dui. Sit amet est placerat in egestas. Urna nec tincidunt praesent semper feugiat nibh sed pulvinar. Tincidunt eget nullam non nisi est sit amet facilisis magna. Elementum tempus egestas sed sed risus pretium quam vulputate dignissim. Fermentum posuere urna nec tincidunt praesent semper feugiat nibh sed. Porttitor eget dolor morbi non arcu risus quis. Non quam lacus suspendisse faucibus interdum. Venenatis cras sed felis eget velit aliquet sagittis id. Arcu ac tortor dignissim convallis aenean et. Morbi tincidunt ornare massa eget egestas purus. Ac feugiat sed lectus vestibulum mattis ullamcorper velit sed ullamcorper. Vestibulum morbi blandit cursus risus at ultrices. Volutpat blandit aliquam etiam erat velit scelerisque. - -Et egestas quis ipsum suspendisse. Amet consectetur adipiscing elit duis. Purus ut faucibus pulvinar elementum integer enim neque. Cursus vitae congue mauris rhoncus aenean vel elit scelerisque mauris. Tincidunt eget nullam non nisi est. Aliquam purus sit amet luctus. Dui ut ornare lectus sit amet est placerat in. Fringilla ut morbi tincidunt augue interdum velit euismod in. Felis eget nunc lobortis mattis aliquam faucibus purus in. Suspendisse interdum consectetur libero id faucibus nisl. - -Scelerisque fermentum dui faucibus in ornare quam. Lectus proin nibh nisl condimentum id venenatis a condimentum vitae. Fames ac turpis egestas integer eget aliquet nibh praesent tristique. Arcu non sodales neque sodales ut etiam sit. Pharetra convallis posuere morbi leo urna. Nec dui nunc mattis enim ut tellus. Nunc sed augue lacus viverra vitae. Consequat id porta nibh venenatis cras sed felis. Dolor sit amet consectetur adipiscing. Tellus rutrum tellus pellentesque eu tincidunt tortor aliquam nulla. - -Metus aliquam eleifend mi in nulla posuere. Blandit massa enim nec dui nunc mattis enim. Aliquet nibh praesent tristique magna. In aliquam sem fringilla ut. Magna fermentum iaculis eu non. Eget aliquet nibh praesent tristique magna sit amet purus. Ultrices gravida dictum fusce ut placerat orci. Fermentum posuere urna nec tincidunt praesent. Enim tortor at auctor urna nunc. Ridiculus mus mauris vitae ultricies leo integer malesuada nunc vel. Sed id semper risus in hendrerit gravida rutrum. Vestibulum lectus mauris ultrices eros in cursus turpis. Et sollicitudin ac orci phasellus egestas tellus rutrum. Pellentesque elit ullamcorper dignissim cras tincidunt lobortis feugiat vivamus at. Metus vulputate eu scelerisque felis imperdiet proin fermentum leo. Porta non pulvinar neque laoreet suspendisse. Suscipit adipiscing bibendum est ultricies integer quis auctor elit sed. Euismod in pellentesque massa placerat duis ultricies lacus sed. Pellentesque adipiscing commodo elit at imperdiet dui accumsan sit amet. - -Pellentesque eu tincidunt tortor aliquam nulla facilisi. Commodo nulla facilisi nullam vehicula ipsum a arcu. Commodo quis imperdiet massa tincidunt nunc pulvinar sapien et. Faucibus purus in massa tempor. Purus semper eget duis at tellus at urna condimentum. Vivamus at augue eget arcu dictum. Lacus vel facilisis volutpat est velit egestas dui id. Malesuada fames ac turpis egestas maecenas pharetra. Nunc faucibus a pellentesque sit amet porttitor eget dolor. Ultricies tristique nulla aliquet enim. Vel risus commodo viverra maecenas accumsan lacus vel facilisis volutpat. Dignissim diam quis enim lobortis scelerisque. Donec ultrices tincidunt arcu non sodales neque sodales ut etiam. - -Vitae proin sagittis nisl rhoncus mattis rhoncus urna neque. Fermentum leo vel orci porta non. At elementum eu facilisis sed. Quis enim lobortis scelerisque fermentum. Fermentum odio eu feugiat pretium nibh ipsum consequat. Habitant morbi tristique senectus et netus et. Enim praesent elementum facilisis leo vel fringilla est ullamcorper. Egestas quis ipsum suspendisse ultrices gravida dictum. Nam libero justo laoreet sit amet cursus sit amet. Viverra tellus in hac habitasse platea dictumst vestibulum. Varius vel pharetra vel turpis nunc eget. Nullam non nisi est sit amet facilisis magna. Ullamcorper eget nulla facilisi etiam dignissim diam. Ante metus dictum at tempor commodo ullamcorper a lacus. - -Etiam non quam lacus suspendisse. Ut venenatis tellus in metus vulputate eu scelerisque felis. Pulvinar sapien et ligula ullamcorper malesuada proin libero. Consequat interdum varius sit amet mattis. Nunc eget lorem dolor sed viverra ipsum nunc aliquet. Potenti nullam ac tortor vitae purus faucibus ornare. Urna et pharetra pharetra massa massa ultricies mi quis hendrerit. Purus in mollis nunc sed id. Pharetra vel turpis nunc eget lorem dolor sed viverra. Et netus et malesuada fames ac turpis. Libero id faucibus nisl tincidunt eget nullam non nisi. Cursus sit amet dictum sit amet. Porttitor lacus luctus accumsan tortor. - -Volutpat diam ut venenatis tellus in metus vulputate eu scelerisque. Sed viverra tellus in hac habitasse. Aliquam sem et tortor consequat id. Pellentesque habitant morbi tristique senectus et netus et. Consectetur purus ut faucibus pulvinar elementum. Aliquam malesuada bibendum arcu vitae elementum curabitur vitae nunc sed. Malesuada bibendum arcu vitae elementum curabitur vitae nunc sed. Sollicitudin tempor id eu nisl nunc mi ipsum. Fringilla phasellus faucibus scelerisque eleifend donec pretium vulputate sapien nec. Quis eleifend quam adipiscing vitae proin sagittis nisl rhoncus. Bibendum neque egestas congue quisque egestas. A iaculis at erat pellentesque adipiscing commodo elit at imperdiet. Pulvinar etiam non quam lacus. Adipiscing commodo elit at imperdiet. Scelerisque eu ultrices vitae auctor. Sed cras ornare arcu dui vivamus arcu felis bibendum ut. Ornare lectus sit amet est. - -Consequat semper viverra nam libero justo laoreet sit. Imperdiet sed euismod nisi porta lorem mollis aliquam ut porttitor. Cras sed felis eget velit aliquet sagittis id consectetur. Dolor morbi non arcu risus quis. Adipiscing tristique risus nec feugiat in fermentum posuere urna. Dolor magna eget est lorem ipsum dolor. Mauris pharetra et ultrices neque ornare aenean euismod. Nulla facilisi etiam dignissim diam quis. Ultrices tincidunt arcu non sodales. Fames ac turpis egestas maecenas pharetra convallis posuere morbi leo. Interdum varius sit amet mattis vulputate. Tincidunt praesent semper feugiat nibh sed pulvinar. Quisque sagittis purus sit amet volutpat. - -Sed vulputate odio ut enim blandit. Vitae auctor eu augue ut lectus arcu bibendum. Consectetur adipiscing elit pellentesque habitant morbi tristique senectus et. Scelerisque eu ultrices vitae auctor eu augue. Etiam dignissim diam quis enim lobortis scelerisque fermentum dui faucibus. Tellus integer feugiat scelerisque varius. Vulputate enim nulla aliquet porttitor lacus luctus accumsan tortor. Amet nisl purus in mollis. Scelerisque viverra mauris in aliquam sem fringilla ut morbi tincidunt. Semper eget duis at tellus at. Erat velit scelerisque in dictum non consectetur a erat nam. Gravida rutrum quisque non tellus orci. Morbi blandit cursus risus at. Mauris sit amet massa vitae. Non odio euismod lacinia at quis risus sed vulputate. Fermentum posuere urna nec tincidunt praesent. Ut eu sem integer vitae justo eget magna fermentum iaculis. Ullamcorper velit sed ullamcorper morbi tincidunt ornare massa. Arcu cursus euismod quis viverra nibh. Arcu dui vivamus arcu felis bibendum. - -Eros in cursus turpis massa tincidunt dui ut. Urna condimentum mattis pellentesque id nibh tortor id aliquet lectus. Nibh venenatis cras sed felis. Ac felis donec et odio pellentesque diam. Ultricies lacus sed turpis tincidunt id aliquet risus. Diam volutpat commodo sed egestas. Dignissim sodales ut eu sem integer vitae. Pellentesque eu tincidunt tortor aliquam nulla facilisi. Et tortor consequat id porta nibh venenatis cras sed. \ No newline at end of file diff --git a/node/impl/client/testdata/payload2.txt b/node/impl/client/testdata/payload2.txt deleted file mode 100644 index 16fb150f5b2..00000000000 --- a/node/impl/client/testdata/payload2.txt +++ /dev/null @@ -1,49 +0,0 @@ -Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Vitae semper quis lectus nulla at volutpat diam ut venenatis. Ac tortor dignissim convallis aenean et tortor at. Faucibus ornare suspendisse sed nisi lacus sed. Commodo ullamcorper a lacus vestibulum sed arcu non. Est pellentesque elit ullamcorper dignissim. Quam quisque id diam vel quam. Pretium aenean pharetra magna ac. In nulla posuere sollicitudin aliquam ultrices. Sed arcu non odio euismod lacinia at. Suspendisse ultrices gravida dictum fusce ut placerat orci nulla pellentesque. Feugiat vivamus at augue eget arcu. - -Pellentesque nec nam aliquam sem et tortor. Vitae tortor condimentum lacinia quis vel. Cras pulvinar mattis nunc sed. In massa tempor nec feugiat. Ornare arcu odio ut sem nulla. Diam maecenas sed enim ut sem. Pretium vulputate sapien nec sagittis. Bibendum arcu vitae elementum curabitur vitae nunc sed velit dignissim. Duis ut diam quam nulla porttitor massa. Viverra mauris in aliquam sem fringilla ut morbi. Ullamcorper eget nulla facilisi etiam dignissim. Vulputate mi sit amet mauris commodo quis imperdiet massa tincidunt. Nunc consequat interdum varius sit. Nunc mi ipsum faucibus vitae aliquet nec ullamcorper. Nunc sed augue lacus viverra. Lobortis scelerisque fermentum dui faucibus in ornare quam. Urna neque viverra justo nec ultrices. Varius vel pharetra vel turpis nunc eget lorem dolor sed. - -Feugiat nisl pretium fusce id velit ut tortor pretium. Lorem dolor sed viverra ipsum nunc aliquet bibendum. Ultrices vitae auctor eu augue ut lectus. Pharetra massa massa ultricies mi quis. Nibh cras pulvinar mattis nunc sed blandit libero. Ac felis donec et odio pellentesque diam volutpat. Lectus proin nibh nisl condimentum id venenatis. Quis vel eros donec ac odio. Commodo sed egestas egestas fringilla phasellus faucibus scelerisque eleifend donec. Adipiscing diam donec adipiscing tristique. - -Tempus imperdiet nulla malesuada pellentesque elit eget gravida cum sociis. Libero nunc consequat interdum varius sit. Et pharetra pharetra massa massa. Feugiat pretium nibh ipsum consequat. Amet commodo nulla facilisi nullam vehicula. Ornare arcu dui vivamus arcu felis bibendum ut tristique. At erat pellentesque adipiscing commodo elit at imperdiet dui. Auctor neque vitae tempus quam pellentesque nec nam aliquam sem. Eget velit aliquet sagittis id consectetur. Enim diam vulputate ut pharetra sit amet aliquam id diam. Eget velit aliquet sagittis id consectetur purus ut faucibus pulvinar. Amet porttitor eget dolor morbi. Felis eget velit aliquet sagittis id. Facilisis magna etiam tempor orci eu. Lacus suspendisse faucibus interdum posuere lorem. Pharetra et ultrices neque ornare aenean euismod. Platea dictumst quisque sagittis purus. - -Quis varius quam quisque id diam vel quam elementum. Augue mauris augue neque gravida in fermentum et sollicitudin. Sapien nec sagittis aliquam malesuada bibendum arcu. Urna duis convallis convallis tellus id interdum velit. Tellus in hac habitasse platea dictumst vestibulum. Fames ac turpis egestas maecenas pharetra convallis. Diam volutpat commodo sed egestas egestas fringilla phasellus faucibus. Placerat orci nulla pellentesque dignissim enim sit amet venenatis. Sed adipiscing diam donec adipiscing. Praesent elementum facilisis leo vel fringilla est. Sed enim ut sem viverra aliquet eget sit amet tellus. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra. Turpis egestas pretium aenean pharetra magna ac placerat vestibulum. Massa id neque aliquam vestibulum morbi blandit cursus risus. Vitae congue eu consequat ac. Egestas erat imperdiet sed euismod nisi porta lorem mollis aliquam. Dolor purus non enim praesent elementum facilisis. Ultrices mi tempus imperdiet nulla malesuada pellentesque elit. In est ante in nibh. - -Facilisis gravida neque convallis a. Urna nunc id cursus metus aliquam eleifend mi. Lacus luctus accumsan tortor posuere ac. Molestie nunc non blandit massa. Iaculis urna id volutpat lacus laoreet non. Cursus vitae congue mauris rhoncus aenean. Nunc vel risus commodo viverra maecenas. A pellentesque sit amet porttitor eget dolor morbi. Leo vel orci porta non pulvinar neque laoreet suspendisse. Sit amet facilisis magna etiam tempor. Consectetur a erat nam at lectus urna duis convallis convallis. Vestibulum morbi blandit cursus risus at ultrices. Dolor purus non enim praesent elementum. Adipiscing elit pellentesque habitant morbi tristique senectus et netus et. Et odio pellentesque diam volutpat commodo sed egestas egestas fringilla. Leo vel fringilla est ullamcorper eget nulla. Dui ut ornare lectus sit amet. Erat pellentesque adipiscing commodo elit at imperdiet dui accumsan sit. - -Tristique senectus et netus et. Pellentesque diam volutpat commodo sed egestas egestas fringilla. Mauris pharetra et ultrices neque ornare aenean. Amet tellus cras adipiscing enim. Convallis aenean et tortor at risus viverra adipiscing at. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra justo. Dictumst vestibulum rhoncus est pellentesque elit. Fringilla ut morbi tincidunt augue interdum velit euismod in pellentesque. Dictum at tempor commodo ullamcorper a lacus vestibulum. Sed viverra tellus in hac habitasse platea. Sed id semper risus in hendrerit. In hendrerit gravida rutrum quisque non tellus orci ac. Sit amet risus nullam eget. Sit amet est placerat in egestas erat imperdiet sed. In nisl nisi scelerisque eu ultrices. Sit amet mattis vulputate enim nulla aliquet. - -Dignissim suspendisse in est ante in nibh mauris cursus. Vitae proin sagittis nisl rhoncus. Id leo in vitae turpis massa sed elementum. Lobortis elementum nibh tellus molestie nunc non blandit massa enim. Arcu dictum varius duis at consectetur. Suspendisse faucibus interdum posuere lorem ipsum dolor sit amet consectetur. Imperdiet nulla malesuada pellentesque elit eget gravida cum sociis. Sed adipiscing diam donec adipiscing. Purus sit amet volutpat consequat mauris nunc congue nisi vitae. Elementum nisi quis eleifend quam adipiscing vitae proin sagittis nisl. Mattis ullamcorper velit sed ullamcorper morbi tincidunt ornare massa. Sit amet nisl purus in mollis nunc sed. Turpis tincidunt id aliquet risus feugiat in ante. Id diam maecenas ultricies mi eget mauris pharetra et ultrices. - -Aliquam purus sit amet luctus venenatis lectus magna fringilla urna. Id diam vel quam elementum pulvinar. Elementum sagittis vitae et leo duis. Viverra aliquet eget sit amet tellus cras adipiscing enim eu. Et tortor at risus viverra adipiscing at in tellus integer. Purus in massa tempor nec feugiat. Augue neque gravida in fermentum et sollicitudin ac orci. Sodales ut eu sem integer vitae justo eget magna fermentum. Netus et malesuada fames ac. Augue interdum velit euismod in. Sed elementum tempus egestas sed sed risus pretium. Mattis vulputate enim nulla aliquet porttitor lacus luctus. Dui vivamus arcu felis bibendum ut tristique et egestas quis. - -Viverra justo nec ultrices dui sapien. Quisque egestas diam in arcu cursus euismod quis viverra nibh. Nam libero justo laoreet sit amet cursus sit amet. Lacus sed viverra tellus in hac habitasse. Blandit aliquam etiam erat velit scelerisque in. Ut sem nulla pharetra diam sit amet nisl suscipit adipiscing. Diam sollicitudin tempor id eu nisl nunc. Eget duis at tellus at urna condimentum mattis. Urna porttitor rhoncus dolor purus non enim praesent elementum facilisis. Sed turpis tincidunt id aliquet risus feugiat. Est velit egestas dui id ornare arcu odio ut sem. Nibh sit amet commodo nulla facilisi nullam vehicula. Sit amet consectetur adipiscing elit duis tristique sollicitudin. Eu facilisis sed odio morbi. Massa id neque aliquam vestibulum morbi. In eu mi bibendum neque egestas congue quisque egestas. Massa sed elementum tempus egestas sed sed risus. Quam elementum pulvinar etiam non. At augue eget arcu dictum varius duis at consectetur lorem. - -Penatibus et magnis dis parturient montes nascetur ridiculus. Dictumst quisque sagittis purus sit amet volutpat consequat. Bibendum at varius vel pharetra. Sed adipiscing diam donec adipiscing tristique risus nec feugiat in. Phasellus faucibus scelerisque eleifend donec pretium. Vitae tortor condimentum lacinia quis vel eros. Ac tincidunt vitae semper quis lectus nulla at volutpat diam. Eget sit amet tellus cras adipiscing. Morbi tristique senectus et netus. Nullam vehicula ipsum a arcu cursus vitae congue mauris rhoncus. Auctor urna nunc id cursus metus aliquam eleifend. Ultrices vitae auctor eu augue. Eu non diam phasellus vestibulum lorem sed risus ultricies. Fames ac turpis egestas sed tempus. Volutpat blandit aliquam etiam erat. Dictum varius duis at consectetur lorem. Sit amet volutpat consequat mauris nunc congue. Volutpat sed cras ornare arcu dui vivamus arcu felis. - -Scelerisque fermentum dui faucibus in ornare quam viverra. Interdum velit laoreet id donec ultrices tincidunt arcu. Netus et malesuada fames ac. Netus et malesuada fames ac turpis. Suscipit tellus mauris a diam maecenas sed enim ut sem. Id velit ut tortor pretium. Neque aliquam vestibulum morbi blandit cursus risus at. Cum sociis natoque penatibus et magnis dis parturient. Lobortis elementum nibh tellus molestie nunc non blandit. Ipsum dolor sit amet consectetur adipiscing elit duis tristique. Amet nisl purus in mollis. Amet massa vitae tortor condimentum lacinia quis vel eros donec. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra justo. - -Nullam ac tortor vitae purus faucibus. Dis parturient montes nascetur ridiculus mus mauris. Molestie at elementum eu facilisis sed odio morbi. Scelerisque felis imperdiet proin fermentum leo vel orci porta. Lectus proin nibh nisl condimentum id venenatis a. Eget nullam non nisi est sit amet facilisis. Hendrerit gravida rutrum quisque non tellus orci ac auctor. Ut faucibus pulvinar elementum integer enim. Rhoncus dolor purus non enim praesent elementum facilisis. Enim sed faucibus turpis in eu mi bibendum. Faucibus nisl tincidunt eget nullam. - -Cursus risus at ultrices mi tempus imperdiet nulla malesuada pellentesque. Pretium nibh ipsum consequat nisl vel pretium lectus quam. Semper viverra nam libero justo laoreet sit amet cursus sit. Augue eget arcu dictum varius duis at consectetur lorem donec. Et malesuada fames ac turpis. Erat nam at lectus urna duis convallis convallis. Dictum sit amet justo donec enim. Urna condimentum mattis pellentesque id nibh tortor id. Morbi tempus iaculis urna id. Lectus proin nibh nisl condimentum id venenatis a condimentum. Nibh sit amet commodo nulla facilisi nullam vehicula. Dui faucibus in ornare quam. Gravida arcu ac tortor dignissim convallis aenean. Consectetur adipiscing elit pellentesque habitant morbi tristique. Pulvinar elementum integer enim neque volutpat ac tincidunt vitae. Pharetra pharetra massa massa ultricies mi quis hendrerit. Dictum at tempor commodo ullamcorper a lacus vestibulum sed. Mattis pellentesque id nibh tortor id. Ultricies integer quis auctor elit sed vulputate. Pretium vulputate sapien nec sagittis aliquam malesuada. - -Auctor augue mauris augue neque gravida. Porttitor lacus luctus accumsan tortor posuere ac ut. Urna neque viverra justo nec ultrices dui. Sit amet est placerat in egestas. Urna nec tincidunt praesent semper feugiat nibh sed pulvinar. Tincidunt eget nullam non nisi est sit amet facilisis magna. Elementum tempus egestas sed sed risus pretium quam vulputate dignissim. Fermentum posuere urna nec tincidunt praesent semper feugiat nibh sed. Porttitor eget dolor morbi non arcu risus quis. Non quam lacus suspendisse faucibus interdum. Venenatis cras sed felis eget velit aliquet sagittis id. Arcu ac tortor dignissim convallis aenean et. Morbi tincidunt ornare massa eget egestas purus. Ac feugiat sed lectus vestibulum mattis ullamcorper velit sed ullamcorper. Vestibulum morbi blandit cursus risus at ultrices. Volutpat blandit aliquam etiam erat velit scelerisque. - -Et egestas quis ipsum suspendisse. Amet consectetur adipiscing elit duis. Purus ut faucibus pulvinar elementum integer enim neque. Cursus vitae congue mauris rhoncus aenean vel elit scelerisque mauris. Tincidunt eget nullam non nisi est. Aliquam purus sit amet luctus. Dui ut ornare lectus sit amet est placerat in. Fringilla ut morbi tincidunt augue interdum velit euismod in. Felis eget nunc lobortis mattis aliquam faucibus purus in. Suspendisse interdum consectetur libero id faucibus nisl. - -Scelerisque fermentum dui faucibus in ornare quam. Lectus proin nibh nisl condimentum id venenatis a condimentum vitae. Fames ac turpis egestas integer eget aliquet nibh praesent tristique. Arcu non sodales neque sodales ut etiam sit. Pharetra convallis posuere morbi leo urna. Nec dui nunc mattis enim ut tellus. Nunc sed augue lacus viverra vitae. Consequat id porta nibh venenatis cras sed felis. Dolor sit amet consectetur adipiscing. Tellus rutrum tellus pellentesque eu tincidunt tortor aliquam nulla. - -Metus aliquam eleifend mi in nulla posuere. Blandit massa enim nec dui nunc mattis enim. Aliquet nibh praesent tristique magna. In aliquam sem fringilla ut. Magna fermentum iaculis eu non. Eget aliquet nibh praesent tristique magna sit amet purus. Ultrices gravida dictum fusce ut placerat orci. Fermentum posuere urna nec tincidunt praesent. Enim tortor at auctor urna nunc. Ridiculus mus mauris vitae ultricies leo integer malesuada nunc vel. Sed id semper risus in hendrerit gravida rutrum. Vestibulum lectus mauris ultrices eros in cursus turpis. Et sollicitudin ac orci phasellus egestas tellus rutrum. Pellentesque elit ullamcorper dignissim cras tincidunt lobortis feugiat vivamus at. Metus vulputate eu scelerisque felis imperdiet proin fermentum leo. Porta non pulvinar neque laoreet suspendisse. Suscipit adipiscing bibendum est ultricies integer quis auctor elit sed. Euismod in pellentesque massa placerat duis ultricies lacus sed. Pellentesque adipiscing commodo elit at imperdiet dui accumsan sit amet. - -Pellentesque eu tincidunt tortor aliquam nulla facilisi. Commodo nulla facilisi nullam vehicula ipsum a arcu. Commodo quis imperdiet massa tincidunt nunc pulvinar sapien et. Faucibus purus in massa tempor. Purus semper eget duis at tellus at urna condimentum. Vivamus at augue eget arcu dictum. Lacus vel facilisis volutpat est velit egestas dui id. Malesuada fames ac turpis egestas maecenas pharetra. Nunc faucibus a pellentesque sit amet porttitor eget dolor. Ultricies tristique nulla aliquet enim. Vel risus commodo viverra maecenas accumsan lacus vel facilisis volutpat. Dignissim diam quis enim lobortis scelerisque. Donec ultrices tincidunt arcu non sodales neque sodales ut etiam. - -Vitae proin sagittis nisl rhoncus mattis rhoncus urna neque. Fermentum leo vel orci porta non. At elementum eu facilisis sed. Quis enim lobortis scelerisque fermentum. Fermentum odio eu feugiat pretium nibh ipsum consequat. Habitant morbi tristique senectus et netus et. Enim praesent elementum facilisis leo vel fringilla est ullamcorper. Egestas quis ipsum suspendisse ultrices gravida dictum. Nam libero justo laoreet sit amet cursus sit amet. Viverra tellus in hac habitasse platea dictumst vestibulum. Varius vel pharetra vel turpis nunc eget. Nullam non nisi est sit amet facilisis magna. Ullamcorper eget nulla facilisi etiam dignissim diam. Ante metus dictum at tempor commodo ullamcorper a lacus. - -Etiam non quam lacus suspendisse. Ut venenatis tellus in metus vulputate eu scelerisque felis. Pulvinar sapien et ligula ullamcorper malesuada proin libero. Consequat interdum varius sit amet mattis. Nunc eget lorem dolor sed viverra ipsum nunc aliquet. Potenti nullam ac tortor vitae purus faucibus ornare. Urna et pharetra pharetra massa massa ultricies mi quis hendrerit. Purus in mollis nunc sed id. Pharetra vel turpis nunc eget lorem dolor sed viverra. Et netus et malesuada fames ac turpis. Libero id faucibus nisl tincidunt eget nullam non nisi. Cursus sit amet dictum sit amet. Porttitor lacus luctus accumsan tortor. - -Volutpat diam ut venenatis tellus in metus vulputate eu scelerisque. Sed viverra tellus in hac habitasse. Aliquam sem et tortor consequat id. Pellentesque habitant morbi tristique senectus et netus et. Consectetur purus ut faucibus pulvinar elementum. Aliquam malesuada bibendum arcu vitae elementum curabitur vitae nunc sed. Malesuada bibendum arcu vitae elementum curabitur vitae nunc sed. Sollicitudin tempor id eu nisl nunc mi ipsum. Fringilla phasellus faucibus scelerisque eleifend donec pretium vulputate sapien nec. Quis eleifend quam adipiscing vitae proin sagittis nisl rhoncus. Bibendum neque egestas congue quisque egestas. A iaculis at erat pellentesque adipiscing commodo elit at imperdiet. Pulvinar etiam non quam lacus. Adipiscing commodo elit at imperdiet. Scelerisque eu ultrices vitae auctor. Sed cras ornare arcu dui vivamus arcu felis bibendum ut. Ornare lectus sit amet est. - -Consequat semper viverra nam libero justo laoreet sit. Imperdiet sed euismod nisi porta lorem mollis aliquam ut porttitor. Cras sed felis eget velit aliquet sagittis id consectetur. Dolor morbi non arcu risus quis. Adipiscing tristique risus nec feugiat in fermentum posuere urna. Dolor magna eget est lorem ipsum dolor. Mauris pharetra et ultrices neque ornare aenean euismod. Nulla facilisi etiam dignissim diam quis. Ultrices tincidunt arcu non sodales. Fames ac turpis egestas maecenas pharetra convallis posuere morbi leo. Interdum varius sit amet mattis vulputate. Tincidunt praesent semper feugiat nibh sed pulvinar. Quisque sagittis purus sit amet volutpat. - -Sed vulputate odio ut enim blandit. Vitae auctor eu augue ut lectus arcu bibendum. Consectetur adipiscing elit pellentesque habitant morbi tristique senectus et. Scelerisque eu ultrices vitae auctor eu augue. Etiam dignissim diam quis enim lobortis scelerisque fermentum dui faucibus. Tellus integer feugiat scelerisque varius. Vulputate enim nulla aliquet porttitor lacus luctus accumsan tortor. Amet nisl purus in mollis. Scelerisque viverra mauris in aliquam sem fringilla ut morbi tincidunt. Semper eget duis at tellus at. Erat velit scelerisque in dictum non consectetur a erat nam. Gravida rutrum quisque non tellus orci. Morbi blandit cursus risus at. Mauris sit amet massa vitae. Non odio euismod lacinia at quis risus sed vulputate. Fermentum posuere urna nec tincidunt praesent. Ut eu sem integer vitae justo eget magna fermentum iaculis. Ullamcorper velit sed ullamcorper morbi tincidunt ornare massa. Arcu cursus euismod quis viverra nibh. Arcu dui vivamus arcu felis bibendum. - -Eros in cursus turpis massa tincidunt dui ut. Aarsh shah is simply an amazing person. Urna condimentum mattis pellentesque id nibh tortor id aliquet lectus. Nibh venenatis cras sed felis. Ac felis donec et odio pellentesque diam. Ultricies lacus sed turpis tincidunt id aliquet risus. Diam volutpat commodo sed egestas. Dignissim sodales ut eu sem integer vitae. Pellentesque eu tincidunt tortor aliquam nulla facilisi. Et tortor consequat id porta nibh venenatis cras sed. \ No newline at end of file diff --git a/node/impl/common/common.go b/node/impl/common/common.go index eff6b58b8f1..5c4dd6a7fc5 100644 --- a/node/impl/common/common.go +++ b/node/impl/common/common.go @@ -24,6 +24,8 @@ var session = uuid.New() type CommonAPI struct { fx.In + BuildVersion build.BuildVersion + Alerting *alerting.Alerting APISecret *dtypes.APIAlg ShutdownChan dtypes.ShutdownChan @@ -63,7 +65,7 @@ func (a *CommonAPI) Version(context.Context) (api.APIVersion, error) { } return api.APIVersion{ - Version: build.UserVersion(), + Version: string(a.BuildVersion), APIVersion: v, BlockDelay: build.BlockDelaySecs, diff --git a/node/impl/full.go b/node/impl/full.go index 527a5538436..aef7a75cb2a 100644 --- a/node/impl/full.go +++ b/node/impl/full.go @@ -9,7 +9,6 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/node/impl/client" "github.com/filecoin-project/lotus/node/impl/common" "github.com/filecoin-project/lotus/node/impl/full" "github.com/filecoin-project/lotus/node/impl/market" @@ -25,7 +24,6 @@ type FullNodeAPI struct { common.CommonAPI net.NetAPI full.ChainAPI - client.API full.MpoolAPI full.GasAPI market.MarketAPI diff --git a/node/impl/full/dummy.go b/node/impl/full/dummy.go index 1c191afba09..867d5f5a47d 100644 --- a/node/impl/full/dummy.go +++ b/node/impl/full/dummy.go @@ -187,6 +187,10 @@ func (e *EthModuleDummy) EthTraceReplayBlockTransactions(ctx context.Context, bl return nil, ErrModuleDisabled } +func (e *EthModuleDummy) EthTraceTransaction(ctx context.Context, txHash string) ([]*ethtypes.EthTraceTransaction, error) { + return nil, ErrModuleDisabled +} + var _ EthModuleAPI = &EthModuleDummy{} var _ EthEventAPI = &EthModuleDummy{} diff --git a/node/impl/full/eth.go b/node/impl/full/eth.go index e7aeafa9085..82f272c6cff 100644 --- a/node/impl/full/eth.go +++ b/node/impl/full/eth.go @@ -75,6 +75,7 @@ type EthModuleAPI interface { Web3ClientVersion(ctx context.Context) (string, error) EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error) EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) + EthTraceTransaction(ctx context.Context, txHash string) ([]*ethtypes.EthTraceTransaction, error) } type EthEventAPI interface { @@ -294,14 +295,16 @@ func (a *EthModule) EthGetTransactionByHashLimited(ctx context.Context, txHash * // This should be "fine" as anyone using an "Ethereum-centric" block // explorer shouldn't care about seeing pending messages from native // accounts. - tx, err := ethtypes.EthTxFromSignedEthMessage(p) + ethtx, err := ethtypes.EthTransactionFromSignedFilecoinMessage(p) if err != nil { return nil, fmt.Errorf("could not convert Filecoin message into tx: %w", err) } - tx.Hash, err = tx.TxHash() + + tx, err := ethtx.ToEthTx(p) if err != nil { - return nil, fmt.Errorf("could not compute tx hash for eth txn: %w", err) + return nil, fmt.Errorf("could not convert Eth transaction to EthTx: %w", err) } + return &tx, nil } } @@ -817,12 +820,12 @@ func (a *EthModule) EthGasPrice(ctx context.Context) (ethtypes.EthBigInt, error) } func (a *EthModule) EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error) { - txArgs, err := ethtypes.ParseEthTxArgs(rawTx) + txArgs, err := ethtypes.ParseEthTransaction(rawTx) if err != nil { return ethtypes.EmptyEthHash, err } - smsg, err := txArgs.ToSignedMessage() + smsg, err := ethtypes.ToSignedFilecoinMessage(txArgs) if err != nil { return ethtypes.EmptyEthHash, err } @@ -836,7 +839,7 @@ func (a *EthModule) EthSendRawTransaction(ctx context.Context, rawTx ethtypes.Et } func (a *EthModule) Web3ClientVersion(ctx context.Context) (string, error) { - return build.UserVersion(), nil + return string(build.NodeUserVersion()), nil } func (a *EthModule) EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error) { @@ -974,31 +977,77 @@ func (a *EthModule) EthTraceReplayBlockTransactions(ctx context.Context, blkNum return allTraces, nil } -func (a *EthModule) applyMessage(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (res *api.InvocResult, err error) { - ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) +func (a *EthModule) EthTraceTransaction(ctx context.Context, txHash string) ([]*ethtypes.EthTraceTransaction, error) { + + // convert from string to internal type + ethTxHash, err := ethtypes.ParseEthHash(txHash) if err != nil { - return nil, xerrors.Errorf("cannot get tipset: %w", err) + return nil, xerrors.Errorf("cannot parse eth hash: %w", err) } - applyTsMessages := true - if os.Getenv("LOTUS_SKIP_APPLY_TS_MESSAGE_CALL_WITH_GAS") == "1" { - applyTsMessages = false + tx, err := a.EthGetTransactionByHash(ctx, ðTxHash) + if err != nil { + return nil, xerrors.Errorf("cannot get transaction by hash: %w", err) } - // Try calling until we find a height with no migration. - for { - res, err = a.StateManager.CallWithGas(ctx, msg, []types.ChainMsg{}, ts, applyTsMessages) - if err != stmgr.ErrExpensiveFork { - break + if tx == nil { + return nil, xerrors.Errorf("transaction not found") + } + + // tx.BlockNumber is nil when the transaction is still in the mpool/pending + if tx.BlockNumber == nil { + return nil, xerrors.Errorf("no trace for pending transactions") + } + + blockTraces, err := a.EthTraceBlock(ctx, strconv.FormatUint(uint64(*tx.BlockNumber), 10)) + if err != nil { + return nil, xerrors.Errorf("cannot get trace for block: %w", err) + } + + txTraces := make([]*ethtypes.EthTraceTransaction, 0, len(blockTraces)) + for _, blockTrace := range blockTraces { + if blockTrace.TransactionHash == ethTxHash { + // Create a new EthTraceTransaction from the block trace + txTrace := ethtypes.EthTraceTransaction{ + EthTrace: blockTrace.EthTrace, + BlockHash: blockTrace.BlockHash, + BlockNumber: blockTrace.BlockNumber, + TransactionHash: blockTrace.TransactionHash, + TransactionPosition: blockTrace.TransactionPosition, + } + txTraces = append(txTraces, &txTrace) } - ts, err = a.Chain.GetTipSetFromKey(ctx, ts.Parents()) + } + + return txTraces, nil +} + +func (a *EthModule) applyMessage(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (res *api.InvocResult, err error) { + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) + if err != nil { + return nil, xerrors.Errorf("cannot get tipset: %w", err) + } + + if ts.Height() > 0 { + pts, err := a.Chain.GetTipSetFromKey(ctx, ts.Parents()) if err != nil { - return nil, xerrors.Errorf("getting parent tipset: %w", err) + return nil, xerrors.Errorf("failed to find a non-forking epoch: %w", err) + } + // Check for expensive forks from the parents to the tipset, including nil tipsets + if a.StateManager.HasExpensiveForkBetween(pts.Height(), ts.Height()+1) { + return nil, stmgr.ErrExpensiveFork } } + + st, _, err := a.StateManager.TipSetState(ctx, ts) if err != nil { - return nil, xerrors.Errorf("CallWithGas failed: %w", err) + return nil, xerrors.Errorf("cannot get tipset state: %w", err) } + res, err = a.StateManager.ApplyOnStateWithGas(ctx, st, msg, ts) + if err != nil { + return nil, xerrors.Errorf("ApplyWithGasOnState failed: %w", err) + } + if res.MsgRct.ExitCode.IsError() { reason := parseEthRevert(res.MsgRct.Return) return nil, xerrors.Errorf("message execution failed: exit %s, revert reason: %s, vm error: %s", res.MsgRct.ExitCode, reason, res.Error) diff --git a/node/impl/full/eth_events.go b/node/impl/full/eth_events.go index 7baba1e81e1..b82fe264f5b 100644 --- a/node/impl/full/eth_events.go +++ b/node/impl/full/eth_events.go @@ -266,7 +266,7 @@ func (e *ethSubscription) addFilter(ctx context.Context, f filter.Filter) { e.filters = append(e.filters, f) } -// sendOut processes the final subscription queue. It's here in case the subscriber +// startOut processes the final subscription queue. It's here in case the subscriber // is slow, and we need to buffer the messages. func (e *ethSubscription) startOut(ctx context.Context) { for { diff --git a/node/impl/full/eth_utils.go b/node/impl/full/eth_utils.go index a4b1c66bb84..56cc1e094e2 100644 --- a/node/impl/full/eth_utils.go +++ b/node/impl/full/eth_utils.go @@ -250,7 +250,6 @@ func newEthBlockFromFilecoinTipSet(ctx context.Context, ts *types.TipSet, fullTx return ethtypes.EthBlock{}, xerrors.Errorf("failed to convert msg to ethTx: %w", err) } - tx.ChainID = ethtypes.EthUint64(build.Eip155ChainId) tx.BlockHash = &blkHash tx.BlockNumber = &bn tx.TransactionIndex = &ti @@ -449,7 +448,7 @@ func ethTxHashFromMessageCid(ctx context.Context, c cid.Cid, sa StateAPI) (ethty func ethTxHashFromSignedMessage(smsg *types.SignedMessage) (ethtypes.EthHash, error) { if smsg.Signature.Type == crypto.SigTypeDelegated { - tx, err := ethtypes.EthTxFromSignedEthMessage(smsg) + tx, err := ethtypes.EthTransactionFromSignedFilecoinMessage(smsg) if err != nil { return ethtypes.EthHash{}, xerrors.Errorf("failed to convert from signed message: %w", err) } @@ -468,14 +467,13 @@ func newEthTxFromSignedMessage(smsg *types.SignedMessage, st *state.StateTree) ( // This is an eth tx if smsg.Signature.Type == crypto.SigTypeDelegated { - tx, err = ethtypes.EthTxFromSignedEthMessage(smsg) + ethTx, err := ethtypes.EthTransactionFromSignedFilecoinMessage(smsg) if err != nil { return ethtypes.EthTx{}, xerrors.Errorf("failed to convert from signed message: %w", err) } - - tx.Hash, err = tx.TxHash() + tx, err = ethTx.ToEthTx(smsg) if err != nil { - return ethtypes.EthTx{}, xerrors.Errorf("failed to calculate hash for ethTx: %w", err) + return ethtypes.EthTx{}, xerrors.Errorf("failed to convert from signed message: %w", err) } } else if smsg.Signature.Type == crypto.SigTypeSecp256k1 { // Secp Filecoin Message tx, err = ethTxFromNativeMessage(smsg.VMMessage(), st) @@ -535,6 +533,9 @@ func ethTxFromNativeMessage(msg *types.Message, st *state.StateTree) (ethtypes.E codec = uint64(multicodec.Cbor) } + maxFeePerGas := ethtypes.EthBigInt(msg.GasFeeCap) + maxPriorityFeePerGas := ethtypes.EthBigInt(msg.GasPremium) + // We decode as a native call first. ethTx := ethtypes.EthTx{ To: &to, @@ -543,10 +544,10 @@ func ethTxFromNativeMessage(msg *types.Message, st *state.StateTree) (ethtypes.E Nonce: ethtypes.EthUint64(msg.Nonce), ChainID: ethtypes.EthUint64(build.Eip155ChainId), Value: ethtypes.EthBigInt(msg.Value), - Type: ethtypes.Eip1559TxType, + Type: ethtypes.EIP1559TxType, Gas: ethtypes.EthUint64(msg.GasLimit), - MaxFeePerGas: ethtypes.EthBigInt(msg.GasFeeCap), - MaxPriorityFeePerGas: ethtypes.EthBigInt(msg.GasPremium), + MaxFeePerGas: &maxFeePerGas, + MaxPriorityFeePerGas: &maxPriorityFeePerGas, AccessList: []ethtypes.EthHash{}, } @@ -653,6 +654,7 @@ func newEthTxFromMessageLookup(ctx context.Context, msgLookup *api.MsgLookup, tx tx.BlockHash = &blkHash tx.BlockNumber = &bn tx.TransactionIndex = &ti + return tx, nil } @@ -714,7 +716,18 @@ func newEthTxReceipt(ctx context.Context, tx ethtypes.EthTx, lookup *api.MsgLook } baseFee := parentTs.Blocks()[0].ParentBaseFee - gasOutputs := vm.ComputeGasOutputs(lookup.Receipt.GasUsed, int64(tx.Gas), baseFee, big.Int(tx.MaxFeePerGas), big.Int(tx.MaxPriorityFeePerGas), true) + + gasFeeCap, err := tx.GasFeeCap() + if err != nil { + return api.EthTxReceipt{}, xerrors.Errorf("failed to get gas fee cap: %w", err) + } + gasPremium, err := tx.GasPremium() + if err != nil { + return api.EthTxReceipt{}, xerrors.Errorf("failed to get gas premium: %w", err) + } + + gasOutputs := vm.ComputeGasOutputs(lookup.Receipt.GasUsed, int64(tx.Gas), baseFee, big.Int(gasFeeCap), + big.Int(gasPremium), true) totalSpent := big.Sum(gasOutputs.BaseFeeBurn, gasOutputs.MinerTip, gasOutputs.OverEstimationBurn) effectiveGasPrice := big.Zero() diff --git a/node/impl/full/state.go b/node/impl/full/state.go index 6a4f6d5537e..49e97805573 100644 --- a/node/impl/full/state.go +++ b/node/impl/full/state.go @@ -1624,7 +1624,7 @@ func (a *StateAPI) StateMinerSectorAllocated(ctx context.Context, maddr address. return mas.IsAllocated(s) } -// StateVerifiedClientStatus returns the data cap for the given address. +// StateVerifierStatus returns the data cap for the given address. // Returns zero if there is no entry in the data cap table for the // address. func (a *StateAPI) StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) { @@ -1962,6 +1962,7 @@ func (a *StateAPI) StateGetNetworkParams(ctx context.Context) (*api.NetworkParam UpgradeWatermelonHeight: build.UpgradeWatermelonHeight, UpgradeDragonHeight: build.UpgradeDragonHeight, UpgradePhoenixHeight: build.UpgradePhoenixHeight, + UpgradeAussieHeight: build.UpgradeAussieHeight, }, }, nil } diff --git a/node/impl/full/txhashmanager.go b/node/impl/full/txhashmanager.go index 64c488d377c..ba5de0fcb40 100644 --- a/node/impl/full/txhashmanager.go +++ b/node/impl/full/txhashmanager.go @@ -85,11 +85,12 @@ func (m *EthTxHashManager) ProcessSignedMessage(ctx context.Context, msg *types. return } - ethTx, err := ethtypes.EthTxFromSignedEthMessage(msg) + ethTx, err := ethtypes.EthTransactionFromSignedFilecoinMessage(msg) if err != nil { log.Errorf("error converting filecoin message to eth tx: %s", err) return } + txHash, err := ethTx.TxHash() if err != nil { log.Errorf("error hashing transaction: %s", err) diff --git a/node/impl/storminer.go b/node/impl/storminer.go index bd482494017..911d772d8e5 100644 --- a/node/impl/storminer.go +++ b/node/impl/storminer.go @@ -3,34 +3,18 @@ package impl import ( "context" "encoding/json" - "errors" "fmt" "net/http" - "os" - "sort" "strconv" "time" "github.com/google/uuid" "github.com/ipfs/go-cid" - "github.com/ipfs/go-graphsync" - gsimpl "github.com/ipfs/go-graphsync/impl" - "github.com/ipfs/go-graphsync/peerstate" - "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/peer" "go.uber.org/fx" "golang.org/x/xerrors" - "github.com/filecoin-project/dagstore" - "github.com/filecoin-project/dagstore/shard" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" - datatransfer "github.com/filecoin-project/go-data-transfer/v2" - gst "github.com/filecoin-project/go-data-transfer/v2/transport/graphsync" - "github.com/filecoin-project/go-fil-markets/piecestore" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/storagemarket" - filmktsstore "github.com/filecoin-project/go-fil-markets/stores" "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" @@ -46,8 +30,6 @@ import ( "github.com/filecoin-project/lotus/chain/gen" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - mktsdagstore "github.com/filecoin-project/lotus/markets/dagstore" - "github.com/filecoin-project/lotus/markets/storageadapter" "github.com/filecoin-project/lotus/miner" "github.com/filecoin-project/lotus/node/modules" "github.com/filecoin-project/lotus/node/modules/dtypes" @@ -76,18 +58,7 @@ type StorageMinerAPI struct { RemoteStore *paths.Remote // Markets - PieceStore dtypes.ProviderPieceStore `optional:"true"` - StorageProvider storagemarket.StorageProvider `optional:"true"` - RetrievalProvider retrievalmarket.RetrievalProvider `optional:"true"` - SectorAccessor retrievalmarket.SectorAccessor `optional:"true"` - DataTransfer dtypes.ProviderDataTransfer `optional:"true"` - StagingGraphsync dtypes.StagingGraphsync `optional:"true"` - Transport dtypes.ProviderTransport `optional:"true"` - DealPublisher *storageadapter.DealPublisher `optional:"true"` - SectorBlocks *sectorblocks.SectorBlocks `optional:"true"` - Host host.Host `optional:"true"` - DAGStore *dagstore.DAGStore `optional:"true"` - DAGStoreWrapper *mktsdagstore.Wrapper `optional:"true"` + SectorBlocks *sectorblocks.SectorBlocks `optional:"true"` // Miner / storage Miner *sealing.Sealing `optional:"true"` @@ -106,24 +77,10 @@ type StorageMinerAPI struct { // StorageService is populated when we're not the main storage node (e.g. we're a markets node) StorageService modules.MinerStorageService `optional:"true"` - ConsiderOnlineStorageDealsConfigFunc dtypes.ConsiderOnlineStorageDealsConfigFunc `optional:"true"` - SetConsiderOnlineStorageDealsConfigFunc dtypes.SetConsiderOnlineStorageDealsConfigFunc `optional:"true"` - ConsiderOnlineRetrievalDealsConfigFunc dtypes.ConsiderOnlineRetrievalDealsConfigFunc `optional:"true"` - SetConsiderOnlineRetrievalDealsConfigFunc dtypes.SetConsiderOnlineRetrievalDealsConfigFunc `optional:"true"` - StorageDealPieceCidBlocklistConfigFunc dtypes.StorageDealPieceCidBlocklistConfigFunc `optional:"true"` - SetStorageDealPieceCidBlocklistConfigFunc dtypes.SetStorageDealPieceCidBlocklistConfigFunc `optional:"true"` - ConsiderOfflineStorageDealsConfigFunc dtypes.ConsiderOfflineStorageDealsConfigFunc `optional:"true"` - SetConsiderOfflineStorageDealsConfigFunc dtypes.SetConsiderOfflineStorageDealsConfigFunc `optional:"true"` - ConsiderOfflineRetrievalDealsConfigFunc dtypes.ConsiderOfflineRetrievalDealsConfigFunc `optional:"true"` - SetConsiderOfflineRetrievalDealsConfigFunc dtypes.SetConsiderOfflineRetrievalDealsConfigFunc `optional:"true"` - ConsiderVerifiedStorageDealsConfigFunc dtypes.ConsiderVerifiedStorageDealsConfigFunc `optional:"true"` - SetConsiderVerifiedStorageDealsConfigFunc dtypes.SetConsiderVerifiedStorageDealsConfigFunc `optional:"true"` - ConsiderUnverifiedStorageDealsConfigFunc dtypes.ConsiderUnverifiedStorageDealsConfigFunc `optional:"true"` - SetConsiderUnverifiedStorageDealsConfigFunc dtypes.SetConsiderUnverifiedStorageDealsConfigFunc `optional:"true"` - SetSealingConfigFunc dtypes.SetSealingConfigFunc `optional:"true"` - GetSealingConfigFunc dtypes.GetSealingConfigFunc `optional:"true"` - GetExpectedSealDurationFunc dtypes.GetExpectedSealDurationFunc `optional:"true"` - SetExpectedSealDurationFunc dtypes.SetExpectedSealDurationFunc `optional:"true"` + SetSealingConfigFunc dtypes.SetSealingConfigFunc `optional:"true"` + GetSealingConfigFunc dtypes.GetSealingConfigFunc `optional:"true"` + GetExpectedSealDurationFunc dtypes.GetExpectedSealDurationFunc `optional:"true"` + SetExpectedSealDurationFunc dtypes.SetExpectedSealDurationFunc `optional:"true"` HarmonyDB *harmonydb.DB `optional:"true"` } @@ -533,16 +490,6 @@ func (sm *StorageMinerAPI) SealingRemoveRequest(ctx context.Context, schedId uui return sm.StorageMgr.RemoveSchedRequest(ctx, schedId) } -func (sm *StorageMinerAPI) MarketImportDealData(ctx context.Context, propCid cid.Cid, path string) error { - fi, err := os.Open(path) - if err != nil { - return xerrors.Errorf("failed to open file: %w", err) - } - defer fi.Close() //nolint:errcheck - - return sm.StorageProvider.ImportDataForDeal(ctx, propCid, fi) -} - func (sm *StorageMinerAPI) listDeals(ctx context.Context) ([]*api.MarketDeal, error) { ts, err := sm.Full.ChainHead(ctx) if err != nil { @@ -569,671 +516,6 @@ func (sm *StorageMinerAPI) MarketListDeals(ctx context.Context) ([]*api.MarketDe return sm.listDeals(ctx) } -func (sm *StorageMinerAPI) MarketListRetrievalDeals(ctx context.Context) ([]struct{}, error) { - return []struct{}{}, nil -} - -func (sm *StorageMinerAPI) MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error) { - results := make(chan storagemarket.MinerDeal) - unsub := sm.StorageProvider.SubscribeToEvents(func(evt storagemarket.ProviderEvent, deal storagemarket.MinerDeal) { - select { - case results <- deal: - case <-ctx.Done(): - } - }) - go func() { - <-ctx.Done() - unsub() - close(results) - }() - return results, nil -} - -func (sm *StorageMinerAPI) MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) { - return sm.StorageProvider.ListLocalDeals() -} - -func (sm *StorageMinerAPI) MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error { - options := []storagemarket.StorageAskOption{ - storagemarket.MinPieceSize(minPieceSize), - storagemarket.MaxPieceSize(maxPieceSize), - } - - return sm.StorageProvider.SetAsk(price, verifiedPrice, duration, options...) -} - -func (sm *StorageMinerAPI) MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) { - return sm.StorageProvider.GetAsk(), nil -} - -func (sm *StorageMinerAPI) MarketSetRetrievalAsk(ctx context.Context, rask *retrievalmarket.Ask) error { - sm.RetrievalProvider.SetAsk(rask) - return nil -} - -func (sm *StorageMinerAPI) MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error) { - return sm.RetrievalProvider.GetAsk(), nil -} - -func (sm *StorageMinerAPI) MarketListDataTransfers(ctx context.Context) ([]api.DataTransferChannel, error) { - inProgressChannels, err := sm.DataTransfer.InProgressChannels(ctx) - if err != nil { - return nil, err - } - - apiChannels := make([]api.DataTransferChannel, 0, len(inProgressChannels)) - for _, channelState := range inProgressChannels { - apiChannels = append(apiChannels, api.NewDataTransferChannel(sm.Host.ID(), channelState)) - } - - return apiChannels, nil -} - -func (sm *StorageMinerAPI) MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error { - selfPeer := sm.Host.ID() - if isInitiator { - return sm.DataTransfer.RestartDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: selfPeer, Responder: otherPeer, ID: transferID}) - } - return sm.DataTransfer.RestartDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: otherPeer, Responder: selfPeer, ID: transferID}) -} - -func (sm *StorageMinerAPI) MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error { - selfPeer := sm.Host.ID() - if isInitiator { - return sm.DataTransfer.CloseDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: selfPeer, Responder: otherPeer, ID: transferID}) - } - return sm.DataTransfer.CloseDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: otherPeer, Responder: selfPeer, ID: transferID}) -} - -func (sm *StorageMinerAPI) MarketDataTransferUpdates(ctx context.Context) (<-chan api.DataTransferChannel, error) { - channels := make(chan api.DataTransferChannel) - - unsub := sm.DataTransfer.SubscribeToEvents(func(evt datatransfer.Event, channelState datatransfer.ChannelState) { - channel := api.NewDataTransferChannel(sm.Host.ID(), channelState) - select { - case <-ctx.Done(): - case channels <- channel: - } - }) - - go func() { - defer unsub() - <-ctx.Done() - }() - - return channels, nil -} - -func (sm *StorageMinerAPI) MarketDataTransferDiagnostics(ctx context.Context, mpid peer.ID) (*api.TransferDiagnostics, error) { - gsTransport, ok := sm.Transport.(*gst.Transport) - if !ok { - return nil, errors.New("api only works for graphsync as transport") - } - graphsyncConcrete, ok := sm.StagingGraphsync.(*gsimpl.GraphSync) - if !ok { - return nil, errors.New("api only works for non-mock graphsync implementation") - } - - inProgressChannels, err := sm.DataTransfer.InProgressChannels(ctx) - if err != nil { - return nil, err - } - - allReceivingChannels := make(map[datatransfer.ChannelID]datatransfer.ChannelState) - allSendingChannels := make(map[datatransfer.ChannelID]datatransfer.ChannelState) - for channelID, channel := range inProgressChannels { - if channel.OtherPeer() != mpid { - continue - } - if channel.Status() == datatransfer.Completed { - continue - } - if channel.Status() == datatransfer.Failed || channel.Status() == datatransfer.Cancelled { - continue - } - if channel.SelfPeer() == channel.Sender() { - allSendingChannels[channelID] = channel - } else { - allReceivingChannels[channelID] = channel - } - } - - // gather information about active transport channels - transportChannels := gsTransport.ChannelsForPeer(mpid) - // gather information about graphsync state for peer - gsPeerState := graphsyncConcrete.PeerState(mpid) - - sendingTransfers := sm.generateTransfers(ctx, transportChannels.SendingChannels, gsPeerState.IncomingState, allSendingChannels) - receivingTransfers := sm.generateTransfers(ctx, transportChannels.ReceivingChannels, gsPeerState.OutgoingState, allReceivingChannels) - - return &api.TransferDiagnostics{ - SendingTransfers: sendingTransfers, - ReceivingTransfers: receivingTransfers, - }, nil -} - -// generate transfers matches graphsync state and data transfer state for a given peer -// to produce detailed output on what's happening with a transfer -func (sm *StorageMinerAPI) generateTransfers(ctx context.Context, - transportChannels map[datatransfer.ChannelID]gst.ChannelGraphsyncRequests, - gsPeerState peerstate.PeerState, - allChannels map[datatransfer.ChannelID]datatransfer.ChannelState) []*api.GraphSyncDataTransfer { - tc := &transferConverter{ - matchedChannelIds: make(map[datatransfer.ChannelID]struct{}), - matchedRequests: make(map[graphsync.RequestID]*api.GraphSyncDataTransfer), - gsDiagnostics: gsPeerState.Diagnostics(), - requestStates: gsPeerState.RequestStates, - allChannels: allChannels, - } - - // iterate through all operating data transfer transport channels - for channelID, channelRequests := range transportChannels { - originalState, err := sm.DataTransfer.ChannelState(ctx, channelID) - var baseDiagnostics []string - var channelState *api.DataTransferChannel - if err != nil { - baseDiagnostics = append(baseDiagnostics, fmt.Sprintf("Unable to lookup channel state: %s", err)) - } else { - cs := api.NewDataTransferChannel(sm.Host.ID(), originalState) - channelState = &cs - } - // add the current request for this channel - tc.convertTransfer(channelID, true, channelState, baseDiagnostics, channelRequests.Current, true) - for _, requestID := range channelRequests.Previous { - // add any previous requests that were cancelled for a restart - tc.convertTransfer(channelID, true, channelState, baseDiagnostics, requestID, false) - } - } - - // collect any graphsync data for channels we don't have any data transfer data for - tc.collectRemainingTransfers() - - return tc.transfers -} - -type transferConverter struct { - matchedChannelIds map[datatransfer.ChannelID]struct{} - matchedRequests map[graphsync.RequestID]*api.GraphSyncDataTransfer - transfers []*api.GraphSyncDataTransfer - gsDiagnostics map[graphsync.RequestID][]string - requestStates graphsync.RequestStates - allChannels map[datatransfer.ChannelID]datatransfer.ChannelState -} - -// convert transfer assembles transfer and diagnostic data for a given graphsync/data-transfer request -func (tc *transferConverter) convertTransfer(channelID datatransfer.ChannelID, hasChannelID bool, channelState *api.DataTransferChannel, baseDiagnostics []string, - requestID graphsync.RequestID, isCurrentChannelRequest bool) { - diagnostics := baseDiagnostics - state, hasState := tc.requestStates[requestID] - stateString := state.String() - if !hasState { - stateString = "no graphsync state found" - } - var channelIDPtr *datatransfer.ChannelID - if !hasChannelID { - diagnostics = append(diagnostics, fmt.Sprintf("No data transfer channel id for GraphSync request ID %s", requestID)) - } else { - channelIDPtr = &channelID - if isCurrentChannelRequest && !hasState { - diagnostics = append(diagnostics, fmt.Sprintf("No current request state for data transfer channel id %s", channelID)) - } else if !isCurrentChannelRequest && hasState { - diagnostics = append(diagnostics, fmt.Sprintf("Graphsync request %s is a previous request on data transfer channel id %s that was restarted, but it is still running", requestID, channelID)) - } - } - diagnostics = append(diagnostics, tc.gsDiagnostics[requestID]...) - transfer := &api.GraphSyncDataTransfer{ - RequestID: &requestID, - RequestState: stateString, - IsCurrentChannelRequest: isCurrentChannelRequest, - ChannelID: channelIDPtr, - ChannelState: channelState, - Diagnostics: diagnostics, - } - tc.transfers = append(tc.transfers, transfer) - tc.matchedRequests[requestID] = transfer - if hasChannelID { - tc.matchedChannelIds[channelID] = struct{}{} - } -} - -func (tc *transferConverter) collectRemainingTransfers() { - for requestID := range tc.requestStates { - if _, ok := tc.matchedRequests[requestID]; !ok { - tc.convertTransfer(datatransfer.ChannelID{}, false, nil, nil, requestID, false) - } - } - for requestID := range tc.gsDiagnostics { - if _, ok := tc.matchedRequests[requestID]; !ok { - tc.convertTransfer(datatransfer.ChannelID{}, false, nil, nil, requestID, false) - } - } - for channelID, channelState := range tc.allChannels { - if _, ok := tc.matchedChannelIds[channelID]; !ok { - channelID := channelID - cs := api.NewDataTransferChannel(channelState.SelfPeer(), channelState) - transfer := &api.GraphSyncDataTransfer{ - RequestID: nil, - RequestState: "graphsync state unknown", - IsCurrentChannelRequest: false, - ChannelID: &channelID, - ChannelState: &cs, - Diagnostics: []string{"data transfer with no open transport channel, cannot determine linked graphsync request"}, - } - tc.transfers = append(tc.transfers, transfer) - } - } -} - -func (sm *StorageMinerAPI) MarketPendingDeals(ctx context.Context) (api.PendingDealInfo, error) { - return sm.DealPublisher.PendingDeals(), nil -} - -func (sm *StorageMinerAPI) MarketRetryPublishDeal(ctx context.Context, propcid cid.Cid) error { - return sm.StorageProvider.RetryDealPublishing(propcid) -} - -func (sm *StorageMinerAPI) MarketPublishPendingDeals(ctx context.Context) error { - sm.DealPublisher.ForcePublishPendingDeals() - return nil -} - -func (sm *StorageMinerAPI) DagstoreListShards(ctx context.Context) ([]api.DagstoreShardInfo, error) { - if sm.DAGStore == nil { - return nil, fmt.Errorf("dagstore not available on this node") - } - - info := sm.DAGStore.AllShardsInfo() - ret := make([]api.DagstoreShardInfo, 0, len(info)) - for k, i := range info { - ret = append(ret, api.DagstoreShardInfo{ - Key: k.String(), - State: i.ShardState.String(), - Error: func() string { - if i.Error == nil { - return "" - } - return i.Error.Error() - }(), - }) - } - - // order by key. - sort.SliceStable(ret, func(i, j int) bool { - return ret[i].Key < ret[j].Key - }) - - return ret, nil -} - -func (sm *StorageMinerAPI) DagstoreRegisterShard(ctx context.Context, key string) error { - if sm.DAGStore == nil { - return fmt.Errorf("dagstore not available on this node") - } - - // First check if the shard has already been registered - k := shard.KeyFromString(key) - _, err := sm.DAGStore.GetShardInfo(k) - if err == nil { - // Shard already registered, nothing further to do - return nil - } - // If the shard is not registered we would expect ErrShardUnknown - if !errors.Is(err, dagstore.ErrShardUnknown) { - return fmt.Errorf("getting shard info from DAG store: %w", err) - } - - pieceCid, err := cid.Parse(key) - if err != nil { - return fmt.Errorf("parsing shard key as piece cid: %w", err) - } - - if err = filmktsstore.RegisterShardSync(ctx, sm.DAGStoreWrapper, pieceCid, "", true); err != nil { - return fmt.Errorf("failed to register shard: %w", err) - } - - return nil -} - -func (sm *StorageMinerAPI) DagstoreInitializeShard(ctx context.Context, key string) error { - if sm.DAGStore == nil { - return fmt.Errorf("dagstore not available on this node") - } - - k := shard.KeyFromString(key) - - info, err := sm.DAGStore.GetShardInfo(k) - if err != nil { - return fmt.Errorf("failed to get shard info: %w", err) - } - if st := info.ShardState; st != dagstore.ShardStateNew { - return fmt.Errorf("cannot initialize shard; expected state ShardStateNew, was: %s", st.String()) - } - - ch := make(chan dagstore.ShardResult, 1) - if err = sm.DAGStore.AcquireShard(ctx, k, ch, dagstore.AcquireOpts{}); err != nil { - return fmt.Errorf("failed to acquire shard: %w", err) - } - - var res dagstore.ShardResult - select { - case res = <-ch: - case <-ctx.Done(): - return ctx.Err() - } - - if err := res.Error; err != nil { - return fmt.Errorf("failed to acquire shard: %w", err) - } - - if res.Accessor != nil { - err = res.Accessor.Close() - if err != nil { - log.Warnw("failed to close shard accessor; continuing", "shard_key", k, "error", err) - } - } - - return nil -} - -func (sm *StorageMinerAPI) DagstoreInitializeAll(ctx context.Context, params api.DagstoreInitializeAllParams) (<-chan api.DagstoreInitializeAllEvent, error) { - if sm.DAGStore == nil { - return nil, fmt.Errorf("dagstore not available on this node") - } - - if sm.SectorAccessor == nil { - return nil, fmt.Errorf("sector accessor not available on this node") - } - - // prepare the thottler tokens. - var throttle chan struct{} - if c := params.MaxConcurrency; c > 0 { - throttle = make(chan struct{}, c) - for i := 0; i < c; i++ { - throttle <- struct{}{} - } - } - - // are we initializing only unsealed pieces? - onlyUnsealed := !params.IncludeSealed - - info := sm.DAGStore.AllShardsInfo() - var toInitialize []string - for k, i := range info { - if i.ShardState != dagstore.ShardStateNew { - continue - } - - // if we're initializing only unsealed pieces, check if there's an - // unsealed deal for this piece available. - if onlyUnsealed { - pieceCid, err := cid.Decode(k.String()) - if err != nil { - log.Warnw("DagstoreInitializeAll: failed to decode shard key as piece CID; skipping", "shard_key", k.String(), "error", err) - continue - } - - pi, err := sm.PieceStore.GetPieceInfo(pieceCid) - if err != nil { - log.Warnw("DagstoreInitializeAll: failed to get piece info; skipping", "piece_cid", pieceCid, "error", err) - continue - } - - var isUnsealed bool - for _, d := range pi.Deals { - isUnsealed, err = sm.SectorAccessor.IsUnsealed(ctx, d.SectorID, d.Offset.Unpadded(), d.Length.Unpadded()) - if err != nil { - log.Warnw("DagstoreInitializeAll: failed to get unsealed status; skipping deal", "deal_id", d.DealID, "error", err) - continue - } - if isUnsealed { - break - } - } - - if !isUnsealed { - log.Infow("DagstoreInitializeAll: skipping piece because it's sealed", "piece_cid", pieceCid, "error", err) - continue - } - } - - // yes, we're initializing this shard. - toInitialize = append(toInitialize, k.String()) - } - - total := len(toInitialize) - if total == 0 { - out := make(chan api.DagstoreInitializeAllEvent) - close(out) - return out, nil - } - - // response channel must be closed when we're done, or the context is cancelled. - // this buffering is necessary to prevent inflight children goroutines from - // publishing to a closed channel (res) when the context is cancelled. - out := make(chan api.DagstoreInitializeAllEvent, 32) // internal buffer. - res := make(chan api.DagstoreInitializeAllEvent, 32) // returned to caller. - - // pump events back to caller. - // two events per shard. - go func() { - defer close(res) - - for i := 0; i < total*2; i++ { - select { - case res <- <-out: - case <-ctx.Done(): - return - } - } - }() - - go func() { - for i, k := range toInitialize { - if throttle != nil { - select { - case <-throttle: - // acquired a throttle token, proceed. - case <-ctx.Done(): - return - } - } - - go func(k string, i int) { - r := api.DagstoreInitializeAllEvent{ - Key: k, - Event: "start", - Total: total, - Current: i + 1, // start with 1 - } - select { - case out <- r: - case <-ctx.Done(): - return - } - - err := sm.DagstoreInitializeShard(ctx, k) - - if throttle != nil { - throttle <- struct{}{} - } - - r.Event = "end" - if err == nil { - r.Success = true - } else { - r.Success = false - r.Error = err.Error() - } - - select { - case out <- r: - case <-ctx.Done(): - } - }(k, i) - } - }() - - return res, nil - -} - -func (sm *StorageMinerAPI) DagstoreRecoverShard(ctx context.Context, key string) error { - if sm.DAGStore == nil { - return fmt.Errorf("dagstore not available on this node") - } - - k := shard.KeyFromString(key) - - info, err := sm.DAGStore.GetShardInfo(k) - if err != nil { - return fmt.Errorf("failed to get shard info: %w", err) - } - if st := info.ShardState; st != dagstore.ShardStateErrored { - return fmt.Errorf("cannot recover shard; expected state ShardStateErrored, was: %s", st.String()) - } - - ch := make(chan dagstore.ShardResult, 1) - if err = sm.DAGStore.RecoverShard(ctx, k, ch, dagstore.RecoverOpts{}); err != nil { - return fmt.Errorf("failed to recover shard: %w", err) - } - - var res dagstore.ShardResult - select { - case res = <-ch: - case <-ctx.Done(): - return ctx.Err() - } - - return res.Error -} - -func (sm *StorageMinerAPI) DagstoreGC(ctx context.Context) ([]api.DagstoreShardResult, error) { - if sm.DAGStore == nil { - return nil, fmt.Errorf("dagstore not available on this node") - } - - res, err := sm.DAGStore.GC(ctx) - if err != nil { - return nil, fmt.Errorf("failed to gc: %w", err) - } - - ret := make([]api.DagstoreShardResult, 0, len(res.Shards)) - for k, err := range res.Shards { - r := api.DagstoreShardResult{Key: k.String()} - if err == nil { - r.Success = true - } else { - r.Success = false - r.Error = err.Error() - } - ret = append(ret, r) - } - - return ret, nil -} - -func (sm *StorageMinerAPI) IndexerAnnounceDeal(ctx context.Context, proposalCid cid.Cid) error { - return sm.StorageProvider.AnnounceDealToIndexer(ctx, proposalCid) -} - -func (sm *StorageMinerAPI) IndexerAnnounceAllDeals(ctx context.Context) error { - return sm.StorageProvider.AnnounceAllDealsToIndexer(ctx) -} - -func (sm *StorageMinerAPI) DagstoreLookupPieces(ctx context.Context, cid cid.Cid) ([]api.DagstoreShardInfo, error) { - if sm.DAGStore == nil { - return nil, fmt.Errorf("dagstore not available on this node") - } - - keys, err := sm.DAGStore.TopLevelIndex.GetShardsForMultihash(ctx, cid.Hash()) - if err != nil { - return nil, err - } - - var ret []api.DagstoreShardInfo - - for _, k := range keys { - shard, err := sm.DAGStore.GetShardInfo(k) - if err != nil { - return nil, err - } - - ret = append(ret, api.DagstoreShardInfo{ - Key: k.String(), - State: shard.ShardState.String(), - Error: func() string { - if shard.Error == nil { - return "" - } - return shard.Error.Error() - }(), - }) - } - - // order by key. - sort.SliceStable(ret, func(i, j int) bool { - return ret[i].Key < ret[j].Key - }) - - return ret, nil -} - -func (sm *StorageMinerAPI) DealsList(ctx context.Context) ([]*api.MarketDeal, error) { - return sm.listDeals(ctx) -} - -func (sm *StorageMinerAPI) RetrievalDealsList(ctx context.Context) (map[retrievalmarket.ProviderDealIdentifier]retrievalmarket.ProviderDealState, error) { - return sm.RetrievalProvider.ListDeals(), nil -} - -func (sm *StorageMinerAPI) DealsConsiderOnlineStorageDeals(ctx context.Context) (bool, error) { - return sm.ConsiderOnlineStorageDealsConfigFunc() -} - -func (sm *StorageMinerAPI) DealsSetConsiderOnlineStorageDeals(ctx context.Context, b bool) error { - return sm.SetConsiderOnlineStorageDealsConfigFunc(b) -} - -func (sm *StorageMinerAPI) DealsConsiderOnlineRetrievalDeals(ctx context.Context) (bool, error) { - return sm.ConsiderOnlineRetrievalDealsConfigFunc() -} - -func (sm *StorageMinerAPI) DealsSetConsiderOnlineRetrievalDeals(ctx context.Context, b bool) error { - return sm.SetConsiderOnlineRetrievalDealsConfigFunc(b) -} - -func (sm *StorageMinerAPI) DealsConsiderOfflineStorageDeals(ctx context.Context) (bool, error) { - return sm.ConsiderOfflineStorageDealsConfigFunc() -} - -func (sm *StorageMinerAPI) DealsSetConsiderOfflineStorageDeals(ctx context.Context, b bool) error { - return sm.SetConsiderOfflineStorageDealsConfigFunc(b) -} - -func (sm *StorageMinerAPI) DealsConsiderOfflineRetrievalDeals(ctx context.Context) (bool, error) { - return sm.ConsiderOfflineRetrievalDealsConfigFunc() -} - -func (sm *StorageMinerAPI) DealsSetConsiderOfflineRetrievalDeals(ctx context.Context, b bool) error { - return sm.SetConsiderOfflineRetrievalDealsConfigFunc(b) -} - -func (sm *StorageMinerAPI) DealsConsiderVerifiedStorageDeals(ctx context.Context) (bool, error) { - return sm.ConsiderVerifiedStorageDealsConfigFunc() -} - -func (sm *StorageMinerAPI) DealsSetConsiderVerifiedStorageDeals(ctx context.Context, b bool) error { - return sm.SetConsiderVerifiedStorageDealsConfigFunc(b) -} - -func (sm *StorageMinerAPI) DealsConsiderUnverifiedStorageDeals(ctx context.Context) (bool, error) { - return sm.ConsiderUnverifiedStorageDealsConfigFunc() -} - -func (sm *StorageMinerAPI) DealsSetConsiderUnverifiedStorageDeals(ctx context.Context, b bool) error { - return sm.SetConsiderUnverifiedStorageDealsConfigFunc(b) -} - func (sm *StorageMinerAPI) DealsGetExpectedSealDurationFunc(ctx context.Context) (time.Duration, error) { return sm.GetExpectedSealDurationFunc() } @@ -1242,24 +524,6 @@ func (sm *StorageMinerAPI) DealsSetExpectedSealDurationFunc(ctx context.Context, return sm.SetExpectedSealDurationFunc(d) } -func (sm *StorageMinerAPI) DealsImportData(ctx context.Context, deal cid.Cid, fname string) error { - fi, err := os.Open(fname) - if err != nil { - return xerrors.Errorf("failed to open given file: %w", err) - } - defer fi.Close() //nolint:errcheck - - return sm.StorageProvider.ImportDataForDeal(ctx, deal, fi) -} - -func (sm *StorageMinerAPI) DealsPieceCidBlocklist(ctx context.Context) ([]cid.Cid, error) { - return sm.StorageDealPieceCidBlocklistConfigFunc() -} - -func (sm *StorageMinerAPI) DealsSetPieceCidBlocklist(ctx context.Context, cids []cid.Cid) error { - return sm.SetStorageDealPieceCidBlocklistConfigFunc(cids) -} - func (sm *StorageMinerAPI) StorageAddLocal(ctx context.Context, path string) error { if sm.StorageMgr == nil { return xerrors.Errorf("no storage manager") @@ -1283,32 +547,6 @@ func (sm *StorageMinerAPI) StorageRedeclareLocal(ctx context.Context, id *storif return sm.StorageMgr.RedeclareLocalStorage(ctx, id, dropMissing) } - -func (sm *StorageMinerAPI) PiecesListPieces(ctx context.Context) ([]cid.Cid, error) { - return sm.PieceStore.ListPieceInfoKeys() -} - -func (sm *StorageMinerAPI) PiecesListCidInfos(ctx context.Context) ([]cid.Cid, error) { - return sm.PieceStore.ListCidInfoKeys() -} - -func (sm *StorageMinerAPI) PiecesGetPieceInfo(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error) { - pi, err := sm.PieceStore.GetPieceInfo(pieceCid) - if err != nil { - return nil, err - } - return &pi, nil -} - -func (sm *StorageMinerAPI) PiecesGetCIDInfo(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error) { - ci, err := sm.PieceStore.GetCIDInfo(payloadCid) - if err != nil { - return nil, err - } - - return &ci, nil -} - func (sm *StorageMinerAPI) CreateBackup(ctx context.Context, fpath string) error { return backup(ctx, sm.DS, fpath) } diff --git a/node/modules/alerts.go b/node/modules/alerts.go index 9976c6d0e42..e0aa0977a85 100644 --- a/node/modules/alerts.go +++ b/node/modules/alerts.go @@ -100,16 +100,6 @@ func CheckUDPBufferSize(wanted int) func(al *alerting.Alerting) { } } -func LegacyMarketsEOL(al *alerting.Alerting) { - // Add alert if lotus-miner legacy markets subsystem is still in use - alert := al.AddAlertType("system", "EOL") - - // Alert with a message to migrate to Boost or similar markets subsystems - al.Raise(alert, map[string]string{ - "message": "The lotus-miner legacy markets subsystem is deprecated and will be removed in a future release. Please migrate to [Boost](https://boost.filecoin.io) or similar markets subsystems.", - }) -} - func CheckFvmConcurrency() func(al *alerting.Alerting) { return func(al *alerting.Alerting) { fvmConcurrency, ok := os.LookupEnv("LOTUS_FVM_CONCURRENCY") diff --git a/node/modules/client.go b/node/modules/client.go deleted file mode 100644 index 9d8eef4217b..00000000000 --- a/node/modules/client.go +++ /dev/null @@ -1,218 +0,0 @@ -package modules - -import ( - "bytes" - "context" - "os" - "path/filepath" - "time" - - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" - "github.com/libp2p/go-libp2p/core/host" - "go.uber.org/fx" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-data-transfer/v2/channelmonitor" - dtimpl "github.com/filecoin-project/go-data-transfer/v2/impl" - dtnet "github.com/filecoin-project/go-data-transfer/v2/network" - dtgstransport "github.com/filecoin-project/go-data-transfer/v2/transport/graphsync" - "github.com/filecoin-project/go-fil-markets/discovery" - discoveryimpl "github.com/filecoin-project/go-fil-markets/discovery/impl" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - retrievalimpl "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl" - rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" - "github.com/filecoin-project/go-fil-markets/storagemarket" - storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl" - smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/blockstore" - "github.com/filecoin-project/lotus/chain/market" - "github.com/filecoin-project/lotus/journal" - "github.com/filecoin-project/lotus/markets" - marketevents "github.com/filecoin-project/lotus/markets/loggers" - "github.com/filecoin-project/lotus/markets/retrievaladapter" - "github.com/filecoin-project/lotus/markets/storageadapter" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/node/impl/full" - payapi "github.com/filecoin-project/lotus/node/impl/paych" - "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/node/modules/helpers" - "github.com/filecoin-project/lotus/node/repo" - "github.com/filecoin-project/lotus/node/repo/imports" -) - -func HandleMigrateClientFunds(lc fx.Lifecycle, mctx helpers.MetricsCtx, ds dtypes.MetadataDS, wallet full.WalletAPI, fundMgr *market.FundManager) { - lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - addr, err := wallet.WalletDefaultAddress(ctx) - // nothing to be done if there is no default address - if err != nil { - return nil - } - b, err := ds.Get(helpers.LifecycleCtx(mctx, lc), datastore.NewKey("/marketfunds/client")) - if err != nil { - if xerrors.Is(err, datastore.ErrNotFound) { - return nil - } - log.Errorf("client funds migration - getting datastore value: %v", err) - return nil - } - - var value abi.TokenAmount - if err = value.UnmarshalCBOR(bytes.NewReader(b)); err != nil { - log.Errorf("client funds migration - unmarshalling datastore value: %v", err) - return nil - } - _, err = fundMgr.Reserve(ctx, addr, addr, value) - if err != nil { - log.Errorf("client funds migration - reserving funds (wallet %s, addr %s, funds %d): %v", - addr, addr, value, err) - return nil - } - - return ds.Delete(helpers.LifecycleCtx(mctx, lc), datastore.NewKey("/marketfunds/client")) - }, - }) -} - -func ClientImportMgr(ds dtypes.MetadataDS, r repo.LockedRepo) (dtypes.ClientImportMgr, error) { - // store the imports under the repo's `imports` subdirectory. - dir := filepath.Join(r.Path(), "imports") - if err := os.MkdirAll(dir, 0755); err != nil { - return nil, xerrors.Errorf("failed to create directory %s: %w", dir, err) - } - - ns := namespace.Wrap(ds, datastore.NewKey("/client")) - return imports.NewManager(ns, dir), nil -} - -// TODO this should be removed. -func ClientBlockstore() dtypes.ClientBlockstore { - // in most cases this is now unused in normal operations -- however, it's important to preserve for the IPFS use case - return blockstore.WrapIDStore(blockstore.FromDatastore(datastore.NewMapDatastore())) -} - -// NewClientGraphsyncDataTransfer returns a data transfer manager that just -// uses the clients's Client DAG service for transfers -func NewClientGraphsyncDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.Graphsync, ds dtypes.MetadataDS, r repo.LockedRepo) (dtypes.ClientDataTransfer, error) { - // go-data-transfer protocol retries: - // 1s, 5s, 25s, 2m5s, 5m x 11 ~= 1 hour - dtRetryParams := dtnet.RetryParameters(time.Second, 5*time.Minute, 15, 5) - net := dtnet.NewFromLibp2pHost(h, dtRetryParams) - - dtDs := namespace.Wrap(ds, datastore.NewKey("/datatransfer/client/transfers")) - transport := dtgstransport.NewTransport(h.ID(), gs) - - // data-transfer push / pull channel restart configuration: - dtRestartConfig := dtimpl.ChannelRestartConfig(channelmonitor.Config{ - // Disable Accept and Complete timeouts until this issue is resolved: - // https://github.com/filecoin-project/lotus/issues/6343# - // Wait for the other side to respond to an Open channel message - AcceptTimeout: 0, - // Wait for the other side to send a Complete message once all - // data has been sent / received - CompleteTimeout: 0, - - // When an error occurs, wait a little while until all related errors - // have fired before sending a restart message - RestartDebounce: 10 * time.Second, - // After sending a restart, wait for at least 1 minute before sending another - RestartBackoff: time.Minute, - // After trying to restart 3 times, give up and fail the transfer - MaxConsecutiveRestarts: 3, - }) - dt, err := dtimpl.NewDataTransfer(dtDs, net, transport, dtRestartConfig) - if err != nil { - return nil, err - } - - dt.OnReady(marketevents.ReadyLogger("client data transfer")) - lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - dt.SubscribeToEvents(marketevents.DataTransferLogger) - return dt.Start(ctx) - }, - OnStop: func(ctx context.Context) error { - return dt.Stop(ctx) - }, - }) - return dt, nil -} - -// NewClientDatastore creates a datastore for the client to store its deals -func NewClientDatastore(ds dtypes.MetadataDS) dtypes.ClientDatastore { - return namespace.Wrap(ds, datastore.NewKey("/deals/client")) -} - -// StorageBlockstoreAccessor returns the default storage blockstore accessor -// from the import manager. -func StorageBlockstoreAccessor(importmgr dtypes.ClientImportMgr) storagemarket.BlockstoreAccessor { - return storageadapter.NewImportsBlockstoreAccessor(importmgr) -} - -// RetrievalBlockstoreAccessor returns the default retrieval blockstore accessor -// using the subdirectory `retrievals` under the repo. -func RetrievalBlockstoreAccessor(r repo.LockedRepo) (retrievalmarket.BlockstoreAccessor, error) { - dir := filepath.Join(r.Path(), "retrievals") - if err := os.MkdirAll(dir, 0755); err != nil { - return nil, xerrors.Errorf("failed to create directory %s: %w", dir, err) - } - return retrievaladapter.NewCARBlockstoreAccessor(dir), nil -} - -func StorageClient(lc fx.Lifecycle, h host.Host, dataTransfer dtypes.ClientDataTransfer, discovery *discoveryimpl.Local, - deals dtypes.ClientDatastore, scn storagemarket.StorageClientNode, accessor storagemarket.BlockstoreAccessor, j journal.Journal) (storagemarket.StorageClient, error) { - // go-fil-markets protocol retries: - // 1s, 5s, 25s, 2m5s, 5m x 11 ~= 1 hour - marketsRetryParams := smnet.RetryParameters(time.Second, 5*time.Minute, 15, 5) - net := smnet.NewFromLibp2pHost(h, marketsRetryParams) - - c, err := storageimpl.NewClient(net, dataTransfer, discovery, deals, scn, accessor, storageimpl.DealPollingInterval(time.Second), storageimpl.MaxTraversalLinks(config.MaxTraversalLinks)) - if err != nil { - return nil, err - } - c.OnReady(marketevents.ReadyLogger("storage client")) - lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - c.SubscribeToEvents(marketevents.StorageClientLogger) - - evtType := j.RegisterEventType("markets/storage/client", "state_change") - c.SubscribeToEvents(markets.StorageClientJournaler(j, evtType)) - - return c.Start(ctx) - }, - OnStop: func(context.Context) error { - return c.Stop() - }, - }) - return c, nil -} - -// RetrievalClient creates a new retrieval client attached to the client blockstore -func RetrievalClient(forceOffChain bool) func(lc fx.Lifecycle, h host.Host, r repo.LockedRepo, dt dtypes.ClientDataTransfer, payAPI payapi.PaychAPI, resolver discovery.PeerResolver, - ds dtypes.MetadataDS, chainAPI full.ChainAPI, stateAPI full.StateAPI, accessor *retrievaladapter.APIBlockstoreAccessor, j journal.Journal) (retrievalmarket.RetrievalClient, error) { - return func(lc fx.Lifecycle, h host.Host, r repo.LockedRepo, dt dtypes.ClientDataTransfer, payAPI payapi.PaychAPI, resolver discovery.PeerResolver, - ds dtypes.MetadataDS, chainAPI full.ChainAPI, stateAPI full.StateAPI, accessor *retrievaladapter.APIBlockstoreAccessor, j journal.Journal) (retrievalmarket.RetrievalClient, error) { - adapter := retrievaladapter.NewRetrievalClientNode(forceOffChain, payAPI, chainAPI, stateAPI) - network := rmnet.NewFromLibp2pHost(h) - ds = namespace.Wrap(ds, datastore.NewKey("/retrievals/client")) - client, err := retrievalimpl.NewClient(network, dt, adapter, resolver, ds, accessor) - if err != nil { - return nil, err - } - client.OnReady(marketevents.ReadyLogger("retrieval client")) - lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - client.SubscribeToEvents(marketevents.RetrievalClientLogger) - - evtType := j.RegisterEventType("markets/retrieval/client", "state_change") - client.SubscribeToEvents(markets.RetrievalClientJournaler(j, evtType)) - - return client.Start(ctx) - }, - }) - return client, nil - } -} diff --git a/node/modules/dtypes/miner.go b/node/modules/dtypes/miner.go index 24bcc714c17..8e3a50cf14c 100644 --- a/node/modules/dtypes/miner.go +++ b/node/modules/dtypes/miner.go @@ -1,14 +1,11 @@ package dtypes import ( - "context" "time" "github.com/ipfs/go-cid" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/storage/pipeline/sealiface" @@ -89,11 +86,3 @@ type SetExpectedSealDurationFunc func(time.Duration) error // GetExpectedSealDurationFunc is a function which reads from miner // too determine how long sealing is expected to take type GetExpectedSealDurationFunc func() (time.Duration, error) - -type SetMaxDealStartDelayFunc func(time.Duration) error -type GetMaxDealStartDelayFunc func() (time.Duration, error) - -type StorageDealFilter func(ctx context.Context, deal storagemarket.MinerDeal) (bool, string, error) -type RetrievalDealFilter func(ctx context.Context, deal retrievalmarket.ProviderDealState) (bool, string, error) - -type RetrievalPricingFunc func(ctx context.Context, dealPricingParams retrievalmarket.PricingInput) (retrievalmarket.Ask, error) diff --git a/node/modules/dtypes/storage.go b/node/modules/dtypes/storage.go index 7f0466f1f3d..102f6b67c0a 100644 --- a/node/modules/dtypes/storage.go +++ b/node/modules/dtypes/storage.go @@ -4,16 +4,8 @@ import ( bserv "github.com/ipfs/boxo/blockservice" exchange "github.com/ipfs/boxo/exchange" "github.com/ipfs/go-datastore" - "github.com/ipfs/go-graphsync" - - datatransfer "github.com/filecoin-project/go-data-transfer/v2" - dtnet "github.com/filecoin-project/go-data-transfer/v2/network" - "github.com/filecoin-project/go-fil-markets/piecestore" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation" - "github.com/filecoin-project/go-statestore" "github.com/filecoin-project/lotus/blockstore" - "github.com/filecoin-project/lotus/node/repo/imports" ) // MetadataDS stores metadata. By default it's namespaced under /metadata in @@ -67,26 +59,3 @@ type ( type ChainBitswap exchange.Interface type ChainBlockService bserv.BlockService - -type ClientImportMgr *imports.Manager -type ClientBlockstore blockstore.BasicBlockstore -type ClientDealStore *statestore.StateStore -type ClientRequestValidator *requestvalidation.UnifiedRequestValidator -type ClientDatastore datastore.Batching - -type Graphsync graphsync.GraphExchange - -// ClientDataTransfer is a data transfer manager for the client -type ClientDataTransfer datatransfer.Manager - -type ProviderDealStore *statestore.StateStore -type ProviderPieceStore piecestore.PieceStore - -type ProviderRequestValidator *requestvalidation.UnifiedRequestValidator - -// ProviderDataTransfer is a data transfer manager for the provider -type ProviderDataTransfer datatransfer.Manager -type ProviderTransferNetwork dtnet.DataTransferNetwork -type ProviderTransport datatransfer.Transport -type StagingBlockstore blockstore.BasicBlockstore -type StagingGraphsync graphsync.GraphExchange diff --git a/node/modules/graphsync.go b/node/modules/graphsync.go deleted file mode 100644 index ca69cd2d202..00000000000 --- a/node/modules/graphsync.go +++ /dev/null @@ -1,101 +0,0 @@ -package modules - -import ( - "context" - "time" - - "github.com/ipfs/go-graphsync" - graphsyncimpl "github.com/ipfs/go-graphsync/impl" - gsnet "github.com/ipfs/go-graphsync/network" - "github.com/ipfs/go-graphsync/storeutil" - "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/peer" - "go.opencensus.io/stats" - "go.uber.org/fx" - - "github.com/filecoin-project/lotus/metrics" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/node/modules/helpers" - "github.com/filecoin-project/lotus/node/repo" -) - -// Graphsync creates a graphsync instance from the given loader and storer -func Graphsync(parallelTransfersForStorage uint64, parallelTransfersForRetrieval uint64) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, r repo.LockedRepo, clientBs dtypes.ClientBlockstore, chainBs dtypes.ExposedBlockstore, h host.Host) (dtypes.Graphsync, error) { - return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, r repo.LockedRepo, clientBs dtypes.ClientBlockstore, chainBs dtypes.ExposedBlockstore, h host.Host) (dtypes.Graphsync, error) { - graphsyncNetwork := gsnet.NewFromLibp2pHost(h) - lsys := storeutil.LinkSystemForBlockstore(clientBs) - - gs := graphsyncimpl.New(helpers.LifecycleCtx(mctx, lc), - graphsyncNetwork, - lsys, - graphsyncimpl.RejectAllRequestsByDefault(), - graphsyncimpl.MaxInProgressIncomingRequests(parallelTransfersForStorage), - graphsyncimpl.MaxInProgressOutgoingRequests(parallelTransfersForRetrieval), - graphsyncimpl.MaxLinksPerIncomingRequests(config.MaxTraversalLinks), - graphsyncimpl.MaxLinksPerOutgoingRequests(config.MaxTraversalLinks)) - chainLinkSystem := storeutil.LinkSystemForBlockstore(chainBs) - err := gs.RegisterPersistenceOption("chainstore", chainLinkSystem) - if err != nil { - return nil, err - } - gs.RegisterIncomingRequestHook(func(p peer.ID, requestData graphsync.RequestData, hookActions graphsync.IncomingRequestHookActions) { - _, has := requestData.Extension("chainsync") - if has { - // TODO: we should confirm the selector is a reasonable one before we validate - // TODO: this code will get more complicated and should probably not live here eventually - hookActions.ValidateRequest() - hookActions.UsePersistenceOption("chainstore") - } - }) - gs.RegisterOutgoingRequestHook(func(p peer.ID, requestData graphsync.RequestData, hookActions graphsync.OutgoingRequestHookActions) { - _, has := requestData.Extension("chainsync") - if has { - hookActions.UsePersistenceOption("chainstore") - } - }) - - graphsyncStats(mctx, lc, gs) - - return gs, nil - } -} - -func graphsyncStats(mctx helpers.MetricsCtx, lc fx.Lifecycle, gs dtypes.Graphsync) { - stopStats := make(chan struct{}) - lc.Append(fx.Hook{ - OnStart: func(context.Context) error { - go func() { - t := time.NewTicker(10 * time.Second) - for { - select { - case <-t.C: - - st := gs.Stats() - stats.Record(mctx, metrics.GraphsyncReceivingPeersCount.M(int64(st.OutgoingRequests.TotalPeers))) - stats.Record(mctx, metrics.GraphsyncReceivingActiveCount.M(int64(st.OutgoingRequests.Active))) - stats.Record(mctx, metrics.GraphsyncReceivingCountCount.M(int64(st.OutgoingRequests.Pending))) - stats.Record(mctx, metrics.GraphsyncReceivingTotalMemoryAllocated.M(int64(st.IncomingResponses.TotalAllocatedAllPeers))) - stats.Record(mctx, metrics.GraphsyncReceivingTotalPendingAllocations.M(int64(st.IncomingResponses.TotalPendingAllocations))) - stats.Record(mctx, metrics.GraphsyncReceivingPeersPending.M(int64(st.IncomingResponses.NumPeersWithPendingAllocations))) - stats.Record(mctx, metrics.GraphsyncSendingPeersCount.M(int64(st.IncomingRequests.TotalPeers))) - stats.Record(mctx, metrics.GraphsyncSendingActiveCount.M(int64(st.IncomingRequests.Active))) - stats.Record(mctx, metrics.GraphsyncSendingCountCount.M(int64(st.IncomingRequests.Pending))) - stats.Record(mctx, metrics.GraphsyncSendingTotalMemoryAllocated.M(int64(st.OutgoingResponses.TotalAllocatedAllPeers))) - stats.Record(mctx, metrics.GraphsyncSendingTotalPendingAllocations.M(int64(st.OutgoingResponses.TotalPendingAllocations))) - stats.Record(mctx, metrics.GraphsyncSendingPeersPending.M(int64(st.OutgoingResponses.NumPeersWithPendingAllocations))) - - case <-stopStats: - return - } - } - }() - - return nil - }, - OnStop: func(ctx context.Context) error { - close(stopStats) - return nil - }, - }) -} diff --git a/node/modules/lp2p/host.go b/node/modules/lp2p/host.go index 9c140b41ee6..405bb869241 100644 --- a/node/modules/lp2p/host.go +++ b/node/modules/lp2p/host.go @@ -38,7 +38,7 @@ func Peerstore() (peerstore.Peerstore, error) { return pstoremem.NewPeerstore() } -func Host(mctx helpers.MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (RawHost, error) { +func Host(mctx helpers.MetricsCtx, buildVersion build.BuildVersion, lc fx.Lifecycle, params P2PHostIn) (RawHost, error) { pkey := params.Peerstore.PrivKey(params.ID) if pkey == nil { return nil, fmt.Errorf("missing private key for node ID: %s", params.ID) @@ -49,7 +49,7 @@ func Host(mctx helpers.MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (RawHost, libp2p.Peerstore(params.Peerstore), libp2p.NoListenAddrs, libp2p.Ping(true), - libp2p.UserAgent("lotus-" + build.UserVersion()), + libp2p.UserAgent("lotus-" + string(buildVersion)), } for _, o := range params.Opts { opts = append(opts, o...) diff --git a/node/modules/services.go b/node/modules/services.go index f3dd443d94d..9c90ba1308d 100644 --- a/node/modules/services.go +++ b/node/modules/services.go @@ -6,8 +6,6 @@ import ( "strconv" "time" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" @@ -17,9 +15,6 @@ import ( "go.uber.org/fx" "golang.org/x/xerrors" - "github.com/filecoin-project/go-fil-markets/discovery" - discoveryimpl "github.com/filecoin-project/go-fil-markets/discovery/impl" - "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain" "github.com/filecoin-project/lotus/chain/beacon" @@ -34,7 +29,6 @@ import ( "github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/journal/fsjournal" "github.com/filecoin-project/lotus/lib/peermgr" - marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/lotus/node/hello" "github.com/filecoin-project/lotus/node/impl/full" "github.com/filecoin-project/lotus/node/modules/dtypes" @@ -224,24 +218,6 @@ func RelayIndexerMessages(lc fx.Lifecycle, ps *pubsub.PubSub, nn dtypes.NetworkN return nil } -func NewLocalDiscovery(lc fx.Lifecycle, ds dtypes.MetadataDS) (*discoveryimpl.Local, error) { - local, err := discoveryimpl.NewLocal(namespace.Wrap(ds, datastore.NewKey("/deals/local"))) - if err != nil { - return nil, err - } - local.OnReady(marketevents.ReadyLogger("discovery")) - lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - return local.Start(ctx) - }, - }) - return local, nil -} - -func RetrievalResolver(l *discoveryimpl.Local) discovery.PeerResolver { - return discoveryimpl.Multi(l) -} - type RandomBeaconParams struct { fx.In diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index 1b9988b9563..01f293b8f4a 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -1,53 +1,28 @@ package modules import ( - "bytes" "context" "errors" - "fmt" "net/http" - "os" - "path/filepath" "strings" "time" "github.com/google/uuid" - "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/namespace" - graphsync "github.com/ipfs/go-graphsync/impl" - gsnet "github.com/ipfs/go-graphsync/network" - "github.com/ipfs/go-graphsync/storeutil" - provider "github.com/ipni/index-provider" - "github.com/libp2p/go-libp2p/core/host" "go.uber.org/fx" "go.uber.org/multierr" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" - dtimpl "github.com/filecoin-project/go-data-transfer/v2/impl" - dtnet "github.com/filecoin-project/go-data-transfer/v2/network" - dtgstransport "github.com/filecoin-project/go-data-transfer/v2/transport/graphsync" - piecefilestore "github.com/filecoin-project/go-fil-markets/filestore" - piecestoreimpl "github.com/filecoin-project/go-fil-markets/piecestore/impl" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - retrievalimpl "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl" - rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/go-fil-markets/storagemarket" - storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask" - smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network" "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/go-paramfetch" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-statestore" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/api/v1api" - "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/events" @@ -55,11 +30,6 @@ import ( "github.com/filecoin-project/lotus/chain/gen/slashfilter" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/journal" - "github.com/filecoin-project/lotus/markets" - "github.com/filecoin-project/lotus/markets/dagstore" - "github.com/filecoin-project/lotus/markets/idxprov" - marketevents "github.com/filecoin-project/lotus/markets/loggers" - "github.com/filecoin-project/lotus/markets/pricing" lotusminer "github.com/filecoin-project/lotus/miner" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/dtypes" @@ -74,10 +44,6 @@ import ( "github.com/filecoin-project/lotus/storage/wdpost" ) -var ( - StagingAreaDirName = "deal-staging" -) - type UuidWrapper struct { v1api.FullNode } @@ -332,163 +298,6 @@ func WindowPostScheduler(fc config.MinerFeeConfig, pc config.ProvingConfig) func } } -func HandleRetrieval(host host.Host, lc fx.Lifecycle, m retrievalmarket.RetrievalProvider, j journal.Journal) { - m.OnReady(marketevents.ReadyLogger("retrieval provider")) - lc.Append(fx.Hook{ - - OnStart: func(ctx context.Context) error { - m.SubscribeToEvents(marketevents.RetrievalProviderLogger) - - evtType := j.RegisterEventType("markets/retrieval/provider", "state_change") - m.SubscribeToEvents(markets.RetrievalProviderJournaler(j, evtType)) - - return m.Start(ctx) - }, - OnStop: func(context.Context) error { - return m.Stop() - }, - }) -} - -func HandleDeals(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, h storagemarket.StorageProvider, j journal.Journal) { - ctx := helpers.LifecycleCtx(mctx, lc) - h.OnReady(marketevents.ReadyLogger("storage provider")) - lc.Append(fx.Hook{ - OnStart: func(context.Context) error { - h.SubscribeToEvents(marketevents.StorageProviderLogger) - - evtType := j.RegisterEventType("markets/storage/provider", "state_change") - h.SubscribeToEvents(markets.StorageProviderJournaler(j, evtType)) - - return h.Start(ctx) - }, - OnStop: func(context.Context) error { - return h.Stop() - }, - }) -} - -func HandleMigrateProviderFunds(lc fx.Lifecycle, ds dtypes.MetadataDS, node api.FullNode, minerAddress dtypes.MinerAddress) { - lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - b, err := ds.Get(ctx, datastore.NewKey("/marketfunds/provider")) - if err != nil { - if xerrors.Is(err, datastore.ErrNotFound) { - return nil - } - return err - } - - var value abi.TokenAmount - if err = value.UnmarshalCBOR(bytes.NewReader(b)); err != nil { - return err - } - ts, err := node.ChainHead(ctx) - if err != nil { - log.Errorf("provider funds migration - getting chain head: %v", err) - return nil - } - - mi, err := node.StateMinerInfo(ctx, address.Address(minerAddress), ts.Key()) - if err != nil { - log.Errorf("provider funds migration - getting miner info %s: %v", minerAddress, err) - return nil - } - - _, err = node.MarketReserveFunds(ctx, mi.Worker, address.Address(minerAddress), value) - if err != nil { - log.Errorf("provider funds migration - reserving funds (wallet %s, addr %s, funds %d): %v", - mi.Worker, minerAddress, value, err) - return nil - } - - return ds.Delete(ctx, datastore.NewKey("/marketfunds/provider")) - }, - }) -} - -// NewProviderTransferNetwork sets up the libp2p2 protocol networking for data transfer -func NewProviderTransferNetwork(h host.Host) dtypes.ProviderTransferNetwork { - return dtnet.NewFromLibp2pHost(h) -} - -// NewProviderTransport sets up a data transfer transport over graphsync -func NewProviderTransport(h host.Host, gs dtypes.StagingGraphsync) dtypes.ProviderTransport { - return dtgstransport.NewTransport(h.ID(), gs) -} - -// NewProviderDataTransfer returns a data transfer manager -func NewProviderDataTransfer(lc fx.Lifecycle, net dtypes.ProviderTransferNetwork, transport dtypes.ProviderTransport, ds dtypes.MetadataDS, r repo.LockedRepo) (dtypes.ProviderDataTransfer, error) { - dtDs := namespace.Wrap(ds, datastore.NewKey("/datatransfer/provider/transfers")) - - dt, err := dtimpl.NewDataTransfer(dtDs, net, transport) - if err != nil { - return nil, err - } - - dt.OnReady(marketevents.ReadyLogger("provider data transfer")) - lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - dt.SubscribeToEvents(marketevents.DataTransferLogger) - return dt.Start(ctx) - }, - OnStop: func(ctx context.Context) error { - return dt.Stop(ctx) - }, - }) - return dt, nil -} - -// NewProviderPieceStore creates a statestore for storing metadata about pieces -// shared by the storage and retrieval providers -func NewProviderPieceStore(lc fx.Lifecycle, ds dtypes.MetadataDS) (dtypes.ProviderPieceStore, error) { - ps, err := piecestoreimpl.NewPieceStore(namespace.Wrap(ds, datastore.NewKey("/storagemarket"))) - if err != nil { - return nil, err - } - ps.OnReady(marketevents.ReadyLogger("piecestore")) - lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - return ps.Start(ctx) - }, - }) - return ps, nil -} - -// StagingBlockstore creates a blockstore for staging blocks for a miner -// in a storage deal, prior to sealing -func StagingBlockstore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRepo) (dtypes.StagingBlockstore, error) { - ctx := helpers.LifecycleCtx(mctx, lc) - stagingds, err := r.Datastore(ctx, "/staging") - if err != nil { - return nil, err - } - - return blockstore.FromDatastore(stagingds), nil -} - -// StagingGraphsync creates a graphsync instance which reads and writes blocks -// to the StagingBlockstore -func StagingGraphsync(parallelTransfersForStorage uint64, parallelTransfersForStoragePerPeer uint64, parallelTransfersForRetrieval uint64) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, h host.Host) dtypes.StagingGraphsync { - return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, h host.Host) dtypes.StagingGraphsync { - graphsyncNetwork := gsnet.NewFromLibp2pHost(h) - lsys := storeutil.LinkSystemForBlockstore(ibs) - gs := graphsync.New(helpers.LifecycleCtx(mctx, lc), - graphsyncNetwork, - lsys, - graphsync.RejectAllRequestsByDefault(), - graphsync.MaxInProgressIncomingRequests(parallelTransfersForRetrieval), - graphsync.MaxInProgressIncomingRequestsPerPeer(parallelTransfersForStoragePerPeer), - graphsync.MaxInProgressOutgoingRequests(parallelTransfersForStorage), - graphsync.MaxLinksPerIncomingRequests(config.MaxTraversalLinks), - graphsync.MaxLinksPerOutgoingRequests(config.MaxTraversalLinks)) - - graphsyncStats(mctx, lc, gs) - - return gs - } -} - func SetupBlockProducer(lc fx.Lifecycle, ds dtypes.MetadataDS, api v1api.FullNode, epp gen.WinningPoStProver, sf *slashfilter.SlashFilter, j journal.Journal) (*lotusminer.Miner, error) { minerAddr, err := minerAddrFromDS(ds) if err != nil { @@ -512,273 +321,6 @@ func SetupBlockProducer(lc fx.Lifecycle, ds dtypes.MetadataDS, api v1api.FullNod return m, nil } -func NewStorageAsk(ctx helpers.MetricsCtx, fapi v1api.FullNode, ds dtypes.MetadataDS, minerAddress dtypes.MinerAddress, spn storagemarket.StorageProviderNode) (*storedask.StoredAsk, error) { - - mi, err := fapi.StateMinerInfo(ctx, address.Address(minerAddress), types.EmptyTSK) - if err != nil { - return nil, err - } - - providerDs := namespace.Wrap(ds, datastore.NewKey("/deals/provider")) - // legacy this was mistake where this key was place -- so we move the legacy key if need be - err = shared.MoveKey(providerDs, "/latest-ask", "/storage-ask/latest") - if err != nil { - return nil, err - } - return storedask.NewStoredAsk(namespace.Wrap(providerDs, datastore.NewKey("/storage-ask")), datastore.NewKey("latest"), spn, address.Address(minerAddress), - storagemarket.MaxPieceSize(abi.PaddedPieceSize(mi.SectorSize))) -} - -func BasicDealFilter(cfg config.DealmakingConfig, user dtypes.StorageDealFilter) func(onlineOk dtypes.ConsiderOnlineStorageDealsConfigFunc, - offlineOk dtypes.ConsiderOfflineStorageDealsConfigFunc, - verifiedOk dtypes.ConsiderVerifiedStorageDealsConfigFunc, - unverifiedOk dtypes.ConsiderUnverifiedStorageDealsConfigFunc, - blocklistFunc dtypes.StorageDealPieceCidBlocklistConfigFunc, - expectedSealTimeFunc dtypes.GetExpectedSealDurationFunc, - startDelay dtypes.GetMaxDealStartDelayFunc, - spn storagemarket.StorageProviderNode, - r repo.LockedRepo, -) dtypes.StorageDealFilter { - return func(onlineOk dtypes.ConsiderOnlineStorageDealsConfigFunc, - offlineOk dtypes.ConsiderOfflineStorageDealsConfigFunc, - verifiedOk dtypes.ConsiderVerifiedStorageDealsConfigFunc, - unverifiedOk dtypes.ConsiderUnverifiedStorageDealsConfigFunc, - blocklistFunc dtypes.StorageDealPieceCidBlocklistConfigFunc, - expectedSealTimeFunc dtypes.GetExpectedSealDurationFunc, - startDelay dtypes.GetMaxDealStartDelayFunc, - spn storagemarket.StorageProviderNode, - r repo.LockedRepo, - ) dtypes.StorageDealFilter { - - return func(ctx context.Context, deal storagemarket.MinerDeal) (bool, string, error) { - b, err := onlineOk() - if err != nil { - return false, "miner error", err - } - - if deal.Ref != nil && deal.Ref.TransferType != storagemarket.TTManual && !b { - log.Warnf("online storage deal consideration disabled; rejecting storage deal proposal from client: %s", deal.Client.String()) - return false, "miner is not considering online storage deals", nil - } - - b, err = offlineOk() - if err != nil { - return false, "miner error", err - } - - if deal.Ref != nil && deal.Ref.TransferType == storagemarket.TTManual && !b { - log.Warnf("offline storage deal consideration disabled; rejecting storage deal proposal from client: %s", deal.Client.String()) - return false, "miner is not accepting offline storage deals", nil - } - - b, err = verifiedOk() - if err != nil { - return false, "miner error", err - } - - if deal.Proposal.VerifiedDeal && !b { - log.Warnf("verified storage deal consideration disabled; rejecting storage deal proposal from client: %s", deal.Client.String()) - return false, "miner is not accepting verified storage deals", nil - } - - b, err = unverifiedOk() - if err != nil { - return false, "miner error", err - } - - if !deal.Proposal.VerifiedDeal && !b { - log.Warnf("unverified storage deal consideration disabled; rejecting storage deal proposal from client: %s", deal.Client.String()) - return false, "miner is not accepting unverified storage deals", nil - } - - blocklist, err := blocklistFunc() - if err != nil { - return false, "miner error", err - } - - for idx := range blocklist { - if deal.Proposal.PieceCID.Equals(blocklist[idx]) { - log.Warnf("piece CID in proposal %s is blocklisted; rejecting storage deal proposal from client: %s", deal.Proposal.PieceCID, deal.Client.String()) - return false, fmt.Sprintf("miner has blocklisted piece CID %s", deal.Proposal.PieceCID), nil - } - } - - sealDuration, err := expectedSealTimeFunc() - if err != nil { - return false, "miner error", err - } - - sealEpochs := sealDuration / (time.Duration(build.BlockDelaySecs) * time.Second) - _, ht, err := spn.GetChainHead(ctx) - if err != nil { - return false, "failed to get chain head", err - } - earliest := abi.ChainEpoch(sealEpochs) + ht - if deal.Proposal.StartEpoch < earliest { - log.Warnw("proposed deal would start before sealing can be completed; rejecting storage deal proposal from client", "piece_cid", deal.Proposal.PieceCID, "client", deal.Client.String(), "seal_duration", sealDuration, "earliest", earliest, "curepoch", ht) - return false, fmt.Sprintf("cannot seal a sector before %s", deal.Proposal.StartEpoch), nil - } - - sd, err := startDelay() - if err != nil { - return false, "miner error", err - } - - dir := filepath.Join(r.Path(), StagingAreaDirName) - diskUsageBytes, err := r.DiskUsage(dir) - if err != nil { - return false, "miner error", err - } - - if cfg.MaxStagingDealsBytes != 0 && diskUsageBytes >= cfg.MaxStagingDealsBytes { - log.Errorw("proposed deal rejected because there are too many deals in the staging area at the moment", "MaxStagingDealsBytes", cfg.MaxStagingDealsBytes, "DiskUsageBytes", diskUsageBytes) - return false, "cannot accept deal as miner is overloaded at the moment - there are too many staging deals being processed", nil - } - - // Reject if it's more than 7 days in the future - // TODO: read from cfg - maxStartEpoch := earliest + abi.ChainEpoch(uint64(sd.Seconds())/build.BlockDelaySecs) - if deal.Proposal.StartEpoch > maxStartEpoch { - return false, fmt.Sprintf("deal start epoch is too far in the future: %s > %s", deal.Proposal.StartEpoch, maxStartEpoch), nil - } - - if user != nil { - return user(ctx, deal) - } - - return true, "", nil - } - } -} - -func StorageProvider(minerAddress dtypes.MinerAddress, - storedAsk *storedask.StoredAsk, - h host.Host, ds dtypes.MetadataDS, - r repo.LockedRepo, - pieceStore dtypes.ProviderPieceStore, - indexer provider.Interface, - dataTransfer dtypes.ProviderDataTransfer, - spn storagemarket.StorageProviderNode, - df dtypes.StorageDealFilter, - dsw *dagstore.Wrapper, - meshCreator idxprov.MeshCreator, -) (storagemarket.StorageProvider, error) { - net := smnet.NewFromLibp2pHost(h) - - dir := filepath.Join(r.Path(), StagingAreaDirName) - - // migrate temporary files that were created directly under the repo, by - // moving them to the new directory and symlinking them. - oldDir := r.Path() - if err := migrateDealStaging(oldDir, dir); err != nil { - return nil, xerrors.Errorf("failed to make deal staging directory %w", err) - } - - store, err := piecefilestore.NewLocalFileStore(piecefilestore.OsPath(dir)) - if err != nil { - return nil, err - } - - opt := storageimpl.CustomDealDecisionLogic(storageimpl.DealDeciderFunc(df)) - - return storageimpl.NewProvider( - net, - namespace.Wrap(ds, datastore.NewKey("/deals/provider")), - store, - dsw, - indexer, - pieceStore, - dataTransfer, - spn, - address.Address(minerAddress), - storedAsk, - meshCreator, - opt, - ) -} - -func RetrievalDealFilter(userFilter dtypes.RetrievalDealFilter) func(onlineOk dtypes.ConsiderOnlineRetrievalDealsConfigFunc, - offlineOk dtypes.ConsiderOfflineRetrievalDealsConfigFunc) dtypes.RetrievalDealFilter { - return func(onlineOk dtypes.ConsiderOnlineRetrievalDealsConfigFunc, - offlineOk dtypes.ConsiderOfflineRetrievalDealsConfigFunc) dtypes.RetrievalDealFilter { - return func(ctx context.Context, state retrievalmarket.ProviderDealState) (bool, string, error) { - b, err := onlineOk() - if err != nil { - return false, "miner error", err - } - - if !b { - log.Warn("online retrieval deal consideration disabled; rejecting retrieval deal proposal from client") - return false, "miner is not accepting online retrieval deals", nil - } - - b, err = offlineOk() - if err != nil { - return false, "miner error", err - } - - if !b { - log.Info("offline retrieval has not been implemented yet") - } - - if userFilter != nil { - return userFilter(ctx, state) - } - - return true, "", nil - } - } -} - -func RetrievalNetwork(h host.Host) rmnet.RetrievalMarketNetwork { - return rmnet.NewFromLibp2pHost(h) -} - -// RetrievalPricingFunc configures the pricing function to use for retrieval deals. -func RetrievalPricingFunc(cfg config.DealmakingConfig) func(_ dtypes.ConsiderOnlineRetrievalDealsConfigFunc, - _ dtypes.ConsiderOfflineRetrievalDealsConfigFunc) dtypes.RetrievalPricingFunc { - - return func(_ dtypes.ConsiderOnlineRetrievalDealsConfigFunc, - _ dtypes.ConsiderOfflineRetrievalDealsConfigFunc) dtypes.RetrievalPricingFunc { - if cfg.RetrievalPricing.Strategy == config.RetrievalPricingExternalMode { - return pricing.ExternalRetrievalPricingFunc(cfg.RetrievalPricing.External.Path) - } - - return retrievalimpl.DefaultPricingFunc(cfg.RetrievalPricing.Default.VerifiedDealsFreeTransfer) - } -} - -// RetrievalProvider creates a new retrieval provider attached to the provider blockstore -func RetrievalProvider( - maddr dtypes.MinerAddress, - adapter retrievalmarket.RetrievalProviderNode, - sa retrievalmarket.SectorAccessor, - netwk rmnet.RetrievalMarketNetwork, - ds dtypes.MetadataDS, - pieceStore dtypes.ProviderPieceStore, - dt dtypes.ProviderDataTransfer, - pricingFnc dtypes.RetrievalPricingFunc, - userFilter dtypes.RetrievalDealFilter, - dagStore *dagstore.Wrapper, -) (retrievalmarket.RetrievalProvider, error) { - opt := retrievalimpl.DealDeciderOpt(retrievalimpl.DealDecider(userFilter)) - - retrievalmarket.DefaultPricePerByte = big.Zero() // todo: for whatever reason this is a global var in markets - - return retrievalimpl.NewProvider( - address.Address(maddr), - adapter, - sa, - netwk, - pieceStore, - dagStore, - dt, - namespace.Wrap(ds, datastore.NewKey("/retrievals/provider")), - retrievalimpl.RetrievalPricingFunc(pricingFnc), - opt, - ) -} - var WorkerCallsPrefix = datastore.NewKey("/worker/calls") var ManagerWorkPrefix = datastore.NewKey("/stmgr/calls") @@ -838,153 +380,6 @@ func StorageAuthWithURL(apiInfo string) interface{} { } } -func NewConsiderOnlineStorageDealsConfigFunc(r repo.LockedRepo) (dtypes.ConsiderOnlineStorageDealsConfigFunc, error) { - return func() (out bool, err error) { - err = readDealmakingCfg(r, func(c config.DealmakingConfiger) { - cfg := c.GetDealmakingConfig() - out = cfg.ConsiderOnlineStorageDeals - }) - return - }, nil -} - -func NewSetConsideringOnlineStorageDealsFunc(r repo.LockedRepo) (dtypes.SetConsiderOnlineStorageDealsConfigFunc, error) { - return func(b bool) (err error) { - err = mutateDealmakingCfg(r, func(c config.DealmakingConfiger) { - cfg := c.GetDealmakingConfig() - cfg.ConsiderOnlineStorageDeals = b - c.SetDealmakingConfig(cfg) - }) - return - }, nil -} - -func NewConsiderOnlineRetrievalDealsConfigFunc(r repo.LockedRepo) (dtypes.ConsiderOnlineRetrievalDealsConfigFunc, error) { - return func() (out bool, err error) { - err = readDealmakingCfg(r, func(c config.DealmakingConfiger) { - cfg := c.GetDealmakingConfig() - out = cfg.ConsiderOnlineRetrievalDeals - }) - return - }, nil -} - -func NewSetConsiderOnlineRetrievalDealsConfigFunc(r repo.LockedRepo) (dtypes.SetConsiderOnlineRetrievalDealsConfigFunc, error) { - return func(b bool) (err error) { - err = mutateDealmakingCfg(r, func(c config.DealmakingConfiger) { - cfg := c.GetDealmakingConfig() - cfg.ConsiderOnlineRetrievalDeals = b - c.SetDealmakingConfig(cfg) - }) - return - }, nil -} - -func NewStorageDealPieceCidBlocklistConfigFunc(r repo.LockedRepo) (dtypes.StorageDealPieceCidBlocklistConfigFunc, error) { - return func() (out []cid.Cid, err error) { - err = readDealmakingCfg(r, func(c config.DealmakingConfiger) { - cfg := c.GetDealmakingConfig() - out = cfg.PieceCidBlocklist - }) - return - }, nil -} - -func NewSetStorageDealPieceCidBlocklistConfigFunc(r repo.LockedRepo) (dtypes.SetStorageDealPieceCidBlocklistConfigFunc, error) { - return func(blocklist []cid.Cid) (err error) { - err = mutateDealmakingCfg(r, func(c config.DealmakingConfiger) { - cfg := c.GetDealmakingConfig() - cfg.PieceCidBlocklist = blocklist - c.SetDealmakingConfig(cfg) - }) - return - }, nil -} - -func NewConsiderOfflineStorageDealsConfigFunc(r repo.LockedRepo) (dtypes.ConsiderOfflineStorageDealsConfigFunc, error) { - return func() (out bool, err error) { - err = readDealmakingCfg(r, func(c config.DealmakingConfiger) { - cfg := c.GetDealmakingConfig() - out = cfg.ConsiderOfflineStorageDeals - }) - return - }, nil -} - -func NewSetConsideringOfflineStorageDealsFunc(r repo.LockedRepo) (dtypes.SetConsiderOfflineStorageDealsConfigFunc, error) { - return func(b bool) (err error) { - err = mutateDealmakingCfg(r, func(c config.DealmakingConfiger) { - cfg := c.GetDealmakingConfig() - cfg.ConsiderOfflineStorageDeals = b - c.SetDealmakingConfig(cfg) - }) - return - }, nil -} - -func NewConsiderOfflineRetrievalDealsConfigFunc(r repo.LockedRepo) (dtypes.ConsiderOfflineRetrievalDealsConfigFunc, error) { - return func() (out bool, err error) { - err = readDealmakingCfg(r, func(c config.DealmakingConfiger) { - cfg := c.GetDealmakingConfig() - out = cfg.ConsiderOfflineRetrievalDeals - }) - return - }, nil -} - -func NewSetConsiderOfflineRetrievalDealsConfigFunc(r repo.LockedRepo) (dtypes.SetConsiderOfflineRetrievalDealsConfigFunc, error) { - return func(b bool) (err error) { - err = mutateDealmakingCfg(r, func(c config.DealmakingConfiger) { - cfg := c.GetDealmakingConfig() - cfg.ConsiderOfflineRetrievalDeals = b - c.SetDealmakingConfig(cfg) - }) - return - }, nil -} - -func NewConsiderVerifiedStorageDealsConfigFunc(r repo.LockedRepo) (dtypes.ConsiderVerifiedStorageDealsConfigFunc, error) { - return func() (out bool, err error) { - err = readDealmakingCfg(r, func(c config.DealmakingConfiger) { - cfg := c.GetDealmakingConfig() - out = cfg.ConsiderVerifiedStorageDeals - }) - return - }, nil -} - -func NewSetConsideringVerifiedStorageDealsFunc(r repo.LockedRepo) (dtypes.SetConsiderVerifiedStorageDealsConfigFunc, error) { - return func(b bool) (err error) { - err = mutateDealmakingCfg(r, func(c config.DealmakingConfiger) { - cfg := c.GetDealmakingConfig() - cfg.ConsiderVerifiedStorageDeals = b - c.SetDealmakingConfig(cfg) - }) - return - }, nil -} - -func NewConsiderUnverifiedStorageDealsConfigFunc(r repo.LockedRepo) (dtypes.ConsiderUnverifiedStorageDealsConfigFunc, error) { - return func() (out bool, err error) { - err = readDealmakingCfg(r, func(c config.DealmakingConfiger) { - cfg := c.GetDealmakingConfig() - out = cfg.ConsiderUnverifiedStorageDeals - }) - return - }, nil -} - -func NewSetConsideringUnverifiedStorageDealsFunc(r repo.LockedRepo) (dtypes.SetConsiderUnverifiedStorageDealsConfigFunc, error) { - return func(b bool) (err error) { - err = mutateDealmakingCfg(r, func(c config.DealmakingConfiger) { - cfg := c.GetDealmakingConfig() - cfg.ConsiderUnverifiedStorageDeals = b - c.SetDealmakingConfig(cfg) - }) - return - }, nil -} - func NewSetSealConfigFunc(r repo.LockedRepo) (dtypes.SetSealingConfigFunc, error) { return func(cfg sealiface.Config) (err error) { err = mutateSealingCfg(r, func(c config.SealingConfiger) { @@ -1092,48 +487,6 @@ func NewGetSealConfigFunc(r repo.LockedRepo) (dtypes.GetSealingConfigFunc, error }, nil } -func NewSetExpectedSealDurationFunc(r repo.LockedRepo) (dtypes.SetExpectedSealDurationFunc, error) { - return func(delay time.Duration) (err error) { - err = mutateDealmakingCfg(r, func(c config.DealmakingConfiger) { - cfg := c.GetDealmakingConfig() - cfg.ExpectedSealDuration = config.Duration(delay) - c.SetDealmakingConfig(cfg) - }) - return - }, nil -} - -func NewGetExpectedSealDurationFunc(r repo.LockedRepo) (dtypes.GetExpectedSealDurationFunc, error) { - return func() (out time.Duration, err error) { - err = readDealmakingCfg(r, func(c config.DealmakingConfiger) { - cfg := c.GetDealmakingConfig() - out = time.Duration(cfg.ExpectedSealDuration) - }) - return - }, nil -} - -func NewSetMaxDealStartDelayFunc(r repo.LockedRepo) (dtypes.SetMaxDealStartDelayFunc, error) { - return func(delay time.Duration) (err error) { - err = mutateDealmakingCfg(r, func(c config.DealmakingConfiger) { - cfg := c.GetDealmakingConfig() - cfg.MaxDealStartDelay = config.Duration(delay) - c.SetDealmakingConfig(cfg) - }) - return - }, nil -} - -func NewGetMaxDealStartDelayFunc(r repo.LockedRepo) (dtypes.GetMaxDealStartDelayFunc, error) { - return func() (out time.Duration, err error) { - err = readDealmakingCfg(r, func(c config.DealmakingConfiger) { - cfg := c.GetDealmakingConfig() - out = time.Duration(cfg.MaxDealStartDelay) - }) - return - }, nil -} - func readSealingCfg(r repo.LockedRepo, accessor func(config.DealmakingConfiger, config.SealingConfiger)) error { raw, err := r.Config() if err != nil { @@ -1171,91 +524,6 @@ func mutateSealingCfg(r repo.LockedRepo, mutator func(config.SealingConfiger)) e return multierr.Combine(typeErr, setConfigErr) } -func readDealmakingCfg(r repo.LockedRepo, accessor func(config.DealmakingConfiger)) error { - raw, err := r.Config() - if err != nil { - return err - } - - cfg, ok := raw.(config.DealmakingConfiger) - if !ok { - return xerrors.New("expected config with dealmaking config trait") - } - - accessor(cfg) - - return nil -} - -func mutateDealmakingCfg(r repo.LockedRepo, mutator func(config.DealmakingConfiger)) error { - var typeErr error - - setConfigErr := r.SetConfig(func(raw interface{}) { - cfg, ok := raw.(config.DealmakingConfiger) - if !ok { - typeErr = errors.New("expected config with dealmaking config trait") - return - } - - mutator(cfg) - }) - - return multierr.Combine(typeErr, setConfigErr) -} - -func migrateDealStaging(oldPath, newPath string) error { - dirInfo, err := os.Stat(newPath) - if err == nil { - if !dirInfo.IsDir() { - return xerrors.Errorf("%s is not a directory", newPath) - } - // The newPath exists already, below migration has already occurred. - return nil - } - - // if the directory doesn't exist, create it - if os.IsNotExist(err) { - if err := os.MkdirAll(newPath, 0755); err != nil { - return xerrors.Errorf("failed to mk directory %s for deal staging: %w", newPath, err) - } - } else { // if we failed for other reasons, abort. - return err - } - - // if this is the first time we created the directory, symlink all staged deals into it. "Migration" - // get a list of files in the miner repo - dirEntries, err := os.ReadDir(oldPath) - if err != nil { - return xerrors.Errorf("failed to list directory %s for deal staging: %w", oldPath, err) - } - - for _, entry := range dirEntries { - // ignore directories, they are not the deals. - if entry.IsDir() { - continue - } - // the FileStore from fil-storage-market creates temporary staged deal files with the pattern "fstmp" - // https://github.com/filecoin-project/go-fil-markets/blob/00ff81e477d846ac0cb58a0c7d1c2e9afb5ee1db/filestore/filestore.go#L69 - name := entry.Name() - if strings.Contains(name, "fstmp") { - // from the miner repo - oldPath := filepath.Join(oldPath, name) - // to its subdir "deal-staging" - newPath := filepath.Join(newPath, name) - // create a symbolic link in the new deal staging directory to preserve existing staged deals. - // all future staged deals will be created here. - if err := os.Rename(oldPath, newPath); err != nil { - return xerrors.Errorf("failed to move %s to %s: %w", oldPath, newPath, err) - } - if err := os.Symlink(newPath, oldPath); err != nil { - return xerrors.Errorf("failed to symlink %s to %s: %w", oldPath, newPath, err) - } - log.Infow("symlinked staged deal", "from", oldPath, "to", newPath) - } - } - return nil -} - func ExtractEnabledMinerSubsystems(cfg config.MinerSubsystemConfig) (res api.MinerSubsystems) { if cfg.EnableMining { res = append(res, api.SubsystemMining) @@ -1266,8 +534,6 @@ func ExtractEnabledMinerSubsystems(cfg config.MinerSubsystemConfig) (res api.Min if cfg.EnableSectorStorage { res = append(res, api.SubsystemSectorStorage) } - if cfg.EnableMarkets { - res = append(res, api.SubsystemMarkets) - } + return res } diff --git a/node/modules/storageminer_dagstore.go b/node/modules/storageminer_dagstore.go deleted file mode 100644 index 620e690901c..00000000000 --- a/node/modules/storageminer_dagstore.go +++ /dev/null @@ -1,94 +0,0 @@ -package modules - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strconv" - - "github.com/libp2p/go-libp2p/core/host" - "go.uber.org/fx" - "golang.org/x/xerrors" - - "github.com/filecoin-project/dagstore" - - mdagstore "github.com/filecoin-project/lotus/markets/dagstore" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/node/repo" -) - -const ( - EnvDAGStoreCopyConcurrency = "LOTUS_DAGSTORE_COPY_CONCURRENCY" - DefaultDAGStoreDir = "dagstore" -) - -// NewMinerAPI creates a new MinerAPI adaptor for the dagstore mounts. -func NewMinerAPI(cfg config.DAGStoreConfig) func(fx.Lifecycle, repo.LockedRepo, dtypes.ProviderPieceStore, mdagstore.SectorAccessor) (mdagstore.MinerAPI, error) { - return func(lc fx.Lifecycle, r repo.LockedRepo, pieceStore dtypes.ProviderPieceStore, sa mdagstore.SectorAccessor) (mdagstore.MinerAPI, error) { - // caps the amount of concurrent calls to the storage, so that we don't - // spam it during heavy processes like bulk migration. - if v, ok := os.LookupEnv("LOTUS_DAGSTORE_MOUNT_CONCURRENCY"); ok { - concurrency, err := strconv.Atoi(v) - if err == nil { - cfg.MaxConcurrencyStorageCalls = concurrency - } - } - - mountApi := mdagstore.NewMinerAPI(pieceStore, sa, cfg.MaxConcurrencyStorageCalls, cfg.MaxConcurrentUnseals) - ready := make(chan error, 1) - pieceStore.OnReady(func(err error) { - ready <- err - }) - lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - if err := <-ready; err != nil { - return fmt.Errorf("aborting dagstore start; piecestore failed to start: %s", err) - } - return mountApi.Start(ctx) - }, - OnStop: func(context.Context) error { - return nil - }, - }) - - return mountApi, nil - } -} - -// DAGStore constructs a DAG store using the supplied minerAPI, and the -// user configuration. It returns both the DAGStore and the Wrapper suitable for -// passing to markets. -func DAGStore(cfg config.DAGStoreConfig) func(lc fx.Lifecycle, r repo.LockedRepo, minerAPI mdagstore.MinerAPI, h host.Host) (*dagstore.DAGStore, *mdagstore.Wrapper, error) { - return func(lc fx.Lifecycle, r repo.LockedRepo, minerAPI mdagstore.MinerAPI, h host.Host) (*dagstore.DAGStore, *mdagstore.Wrapper, error) { - // fall back to default root directory if not explicitly set in the config. - if cfg.RootDir == "" { - cfg.RootDir = filepath.Join(r.Path(), DefaultDAGStoreDir) - } - - v, ok := os.LookupEnv(EnvDAGStoreCopyConcurrency) - if ok { - concurrency, err := strconv.Atoi(v) - if err == nil { - cfg.MaxConcurrentReadyFetches = concurrency - } - } - - dagst, w, err := mdagstore.NewDAGStore(cfg, minerAPI, h) - if err != nil { - return nil, nil, xerrors.Errorf("failed to create DAG store: %w", err) - } - - lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - return w.Start(ctx) - }, - OnStop: func(context.Context) error { - return w.Close() - }, - }) - - return dagst, w, nil - } -} diff --git a/node/modules/storageminer_idxprov.go b/node/modules/storageminer_idxprov.go deleted file mode 100644 index 777c59386b5..00000000000 --- a/node/modules/storageminer_idxprov.go +++ /dev/null @@ -1,117 +0,0 @@ -package modules - -import ( - "context" - - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" - provider "github.com/ipni/index-provider" - "github.com/ipni/index-provider/engine" - pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/libp2p/go-libp2p/core/host" - "go.uber.org/fx" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/node/modules/dtypes" -) - -type IdxProv struct { - fx.In - - fx.Lifecycle - Datastore dtypes.MetadataDS -} - -func IndexProvider(cfg config.IndexProviderConfig) func(params IdxProv, marketHost host.Host, dt dtypes.ProviderDataTransfer, maddr dtypes.MinerAddress, ps *pubsub.PubSub, nn dtypes.NetworkName) (provider.Interface, error) { - return func(args IdxProv, marketHost host.Host, dt dtypes.ProviderDataTransfer, maddr dtypes.MinerAddress, ps *pubsub.PubSub, nn dtypes.NetworkName) (provider.Interface, error) { - topicName := cfg.TopicName - // If indexer topic name is left empty, infer it from the network name. - if topicName == "" { - // Use the same mechanism as the Dependency Injection (DI) to construct the topic name, - // so that we are certain it is consistent with the name allowed by the subscription - // filter. - // - // See: lp2p.GossipSub. - topicName = build.IndexerIngestTopic(nn) - log.Debugw("Inferred indexer topic from network name", "topic", topicName) - } - - ipds := namespace.Wrap(args.Datastore, datastore.NewKey("/index-provider")) - addrs := marketHost.Addrs() - addrsString := make([]string, 0, len(addrs)) - for _, addr := range addrs { - addrsString = append(addrsString, addr.String()) - } - var opts = []engine.Option{ - engine.WithDatastore(ipds), - engine.WithHost(marketHost), - engine.WithRetrievalAddrs(addrsString...), - engine.WithEntriesCacheCapacity(cfg.EntriesCacheCapacity), - engine.WithChainedEntries(cfg.EntriesChunkSize), - engine.WithTopicName(topicName), - engine.WithPurgeCacheOnStart(cfg.PurgeCacheOnStart), - } - - llog := log.With( - "idxProvEnabled", cfg.Enable, - "pid", marketHost.ID(), - "topic", topicName, - "retAddrs", marketHost.Addrs()) - // If announcements to the network are enabled, then set options for datatransfer publisher. - if cfg.Enable { - // Join the indexer topic using the market's pubsub instance. Otherwise, the provider - // engine would create its own instance of pubsub down the line in dagsync, which has - // no validators by default. - t, err := ps.Join(topicName) - if err != nil { - llog.Errorw("Failed to join indexer topic", "err", err) - return nil, xerrors.Errorf("joining indexer topic %s: %w", topicName, err) - } - - // Get the miner ID and set as extra gossip data. - // The extra data is required by the lotus-specific index-provider gossip message validators. - ma := address.Address(maddr) - opts = append(opts, - engine.WithPublisherKind(engine.DataTransferPublisher), - engine.WithDataTransfer(dt), - engine.WithExtraGossipData(ma.Bytes()), - engine.WithTopic(t), - ) - llog = llog.With("extraGossipData", ma, "publisher", "data-transfer") - } else { - opts = append(opts, engine.WithPublisherKind(engine.NoPublisher)) - llog = llog.With("publisher", "none") - } - - // Instantiate the index provider engine. - e, err := engine.New(opts...) - if err != nil { - return nil, xerrors.Errorf("creating indexer provider engine: %w", err) - } - llog.Info("Instantiated index provider engine") - - args.Lifecycle.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - // Note that the OnStart context is cancelled after startup. Its use in e.Start is - // to start up gossipsub publishers and restore cache, all of which are completed - // before e.Start returns. Therefore, it is fine to reuse the give context. - if err := e.Start(ctx); err != nil { - return xerrors.Errorf("starting indexer provider engine: %w", err) - } - log.Infof("Started index provider engine") - return nil - }, - OnStop: func(_ context.Context) error { - if err := e.Shutdown(); err != nil { - return xerrors.Errorf("shutting down indexer provider engine: %w", err) - } - return nil - }, - }) - return e, nil - } -} diff --git a/node/modules/storageminer_idxprov_test.go b/node/modules/storageminer_idxprov_test.go deleted file mode 100644 index 434577bab64..00000000000 --- a/node/modules/storageminer_idxprov_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package modules_test - -import ( - "context" - "strings" - "testing" - "time" - - "github.com/ipfs/go-datastore" - provider "github.com/ipni/index-provider" - "github.com/libp2p/go-libp2p" - pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/libp2p/go-libp2p/core/host" - "github.com/stretchr/testify/require" - "go.uber.org/fx" - - "github.com/filecoin-project/go-address" - - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/node/modules" - "github.com/filecoin-project/lotus/node/modules/dtypes" -) - -func Test_IndexProviderTopic(t *testing.T) { - tests := []struct { - name string - givenAllowedTopics []string - givenConfiguredTopic string - givenNetworkName dtypes.NetworkName - wantErr string - }{ - { - name: "Joins configured topic when allowed", - givenAllowedTopics: []string{"fish"}, - givenConfiguredTopic: "fish", - }, - { - name: "Joins topic inferred from network name when allowed", - givenAllowedTopics: []string{"/indexer/ingest/fish"}, - givenNetworkName: "fish", - }, - { - name: "Fails to join configured topic when disallowed", - givenAllowedTopics: []string{"/indexer/ingest/fish"}, - givenConfiguredTopic: "lobster", - wantErr: "joining indexer topic lobster: topic is not allowed by the subscription filter", - }, - { - name: "Fails to join topic inferred from network name when disallowed", - givenAllowedTopics: []string{"/indexer/ingest/fish"}, - givenNetworkName: "lobster", - wantErr: "joining indexer topic /indexer/ingest/lobster: topic is not allowed by the subscription filter", - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - h, err := libp2p.New() - require.NoError(t, err) - defer func() { - require.NoError(t, h.Close()) - }() - - filter := pubsub.WithSubscriptionFilter(pubsub.NewAllowlistSubscriptionFilter(test.givenAllowedTopics...)) - ps, err := pubsub.NewGossipSub(ctx, h, filter) - require.NoError(t, err) - - app := fx.New( - fx.Provide( - func() host.Host { return h }, - func() dtypes.NetworkName { return test.givenNetworkName }, - func() dtypes.MinerAddress { return dtypes.MinerAddress(address.TestAddress) }, - func() dtypes.ProviderDataTransfer { return nil }, - func() *pubsub.PubSub { return ps }, - func() dtypes.MetadataDS { return datastore.NewMapDatastore() }, - modules.IndexProvider(config.IndexProviderConfig{ - Enable: true, - TopicName: test.givenConfiguredTopic, - EntriesChunkSize: 16384, - }), - ), - fx.Invoke(func(p provider.Interface) {}), - ) - err = app.Start(ctx) - - if test.wantErr == "" { - require.NoError(t, err) - err = app.Stop(ctx) - require.NoError(t, err) - } else { - require.True(t, strings.HasSuffix(err.Error(), test.wantErr)) - } - }) - } -} diff --git a/node/repo/fsrepo.go b/node/repo/fsrepo.go index ec35f8f3078..98d0bd01b47 100644 --- a/node/repo/fsrepo.go +++ b/node/repo/fsrepo.go @@ -74,11 +74,6 @@ type RepoType interface { APIInfoEnvVars() (string, []string, []string) } -// SupportsStagingDeals is a trait for services that support staging deals -type SupportsStagingDeals interface { - SupportsStagingDeals() -} - var FullNode fullNode type fullNode struct { @@ -108,8 +103,6 @@ var StorageMiner storageMiner type storageMiner struct{} -func (storageMiner) SupportsStagingDeals() {} - func (storageMiner) Type() string { return "StorageMiner" } @@ -131,35 +124,6 @@ func (storageMiner) APIInfoEnvVars() (primary string, fallbacks []string, deprec return "MINER_API_INFO", nil, []string{"STORAGE_API_INFO"} } -var Markets markets - -type markets struct{} - -func (markets) SupportsStagingDeals() {} - -func (markets) Type() string { - return "Markets" -} - -func (markets) Config() interface{} { - return config.DefaultStorageMiner() -} - -func (markets) APIFlags() []string { - // support split markets-miner and monolith deployments. - return []string{"markets-api-url", "miner-api-url"} -} - -func (markets) RepoFlags() []string { - // support split markets-miner and monolith deployments. - return []string{"markets-repo", "miner-repo"} -} - -func (markets) APIInfoEnvVars() (primary string, fallbacks []string, deprecated []string) { - // support split markets-miner and monolith deployments. - return "MARKETS_API_INFO", []string{"MINER_API_INFO"}, nil -} - type worker struct { } @@ -185,30 +149,6 @@ func (worker) APIInfoEnvVars() (primary string, fallbacks []string, deprecated [ return "WORKER_API_INFO", nil, nil } -type curio struct{} - -var Curio curio - -func (curio) Type() string { - return "Curio" -} - -func (curio) Config() interface{} { - return &struct{}{} -} - -func (curio) APIFlags() []string { - return []string{"curio-api-url"} -} - -func (curio) RepoFlags() []string { - return []string{"curio-repo"} -} - -func (curio) APIInfoEnvVars() (primary string, fallbacks []string, deprecated []string) { - return "CURIO_API_INFO", nil, nil -} - var Wallet wallet type wallet struct { diff --git a/node/repo/fsrepo_ds.go b/node/repo/fsrepo_ds.go index a4415692aaf..87dd2b05241 100644 --- a/node/repo/fsrepo_ds.go +++ b/node/repo/fsrepo_ds.go @@ -5,9 +5,7 @@ import ( "os" "path/filepath" - dgbadger "github.com/dgraph-io/badger/v2" "github.com/ipfs/go-datastore" - badger "github.com/ipfs/go-ds-badger2" levelds "github.com/ipfs/go-ds-leveldb" measure "github.com/ipfs/go-ds-measure" ldbopts "github.com/syndtr/goleveldb/leveldb/opt" @@ -18,12 +16,15 @@ type dsCtor func(path string, readonly bool) (datastore.Batching, error) var fsDatastores = map[string]dsCtor{ "metadata": levelDs, +} - // Those need to be fast for large writes... but also need a really good GC :c - "staging": badgerDs, // miner specific +// Helper badgerDs() and its imports are unused +// Leaving here for completeness +// +/* - "client": badgerDs, // client specific -} +dgbadger "github.com/dgraph-io/badger/v2" +badger "github.com/ipfs/go-ds-badger2" func badgerDs(path string, readonly bool) (datastore.Batching, error) { opts := badger.DefaultOptions @@ -34,6 +35,8 @@ func badgerDs(path string, readonly bool) (datastore.Batching, error) { return badger.NewDatastore(path, &opts) } +*/ + func levelDs(path string, readonly bool) (datastore.Batching, error) { return levelds.NewDatastore(path, &levelds.Options{ Compression: ldbopts.NoCompression, diff --git a/node/repo/imports/manager.go b/node/repo/imports/manager.go deleted file mode 100644 index a3648b6b02a..00000000000 --- a/node/repo/imports/manager.go +++ /dev/null @@ -1,275 +0,0 @@ -package imports - -import ( - "context" - "encoding/json" - "fmt" - "os" - "path/filepath" - "strconv" - - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" - "github.com/ipfs/go-datastore/query" - logging "github.com/ipfs/go-log/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-fil-markets/shared" -) - -var log = logging.Logger("importmgr") - -type ID uint64 - -func (id ID) dsKey() datastore.Key { - return datastore.NewKey(fmt.Sprintf("%d", id)) -} - -type Manager struct { - ds datastore.Batching - rootDir string - counter *shared.TimeCounter -} - -type LabelKey = string -type LabelValue = string - -const ( - CAROwnerImportMgr = "importmgr" - CAROwnerUser = "user" -) - -const ( - LSource = LabelKey("source") // Function which created the import - LRootCid = LabelKey("root") // Root CID - LFileName = LabelKey("filename") // Local file path of the source file. - LCARPath = LabelKey("car_path") // Path of the CARv2 file containing the imported data. - LCAROwner = LabelKey("car_owner") // Owner of the CAR; "importmgr" is us; "user" or empty is them. -) - -func NewManager(ds datastore.Batching, rootDir string) *Manager { - ds = namespace.Wrap(ds, datastore.NewKey("/stores")) - ds = datastore.NewLogDatastore(ds, "storess") - - m := &Manager{ - ds: ds, - rootDir: rootDir, - counter: shared.NewTimeCounter(), - } - - log.Info("sanity checking imports") - - ids, err := m.List() - if err != nil { - log.Warnw("failed to enumerate imports on initialization", "error", err) - return m - } - - var broken int - for _, id := range ids { - log := log.With("id", id) - - info, err := m.Info(id) - if err != nil { - log.Warnw("failed to query metadata for import; skipping", "error", err) - continue - } - - log = log.With("source", info.Labels[LSource], "root", info.Labels[LRootCid], "original", info.Labels[LFileName]) - - path, ok := info.Labels[LCARPath] - if !ok { - broken++ - log.Warnw("import lacks carv2 path; import will not work; please reimport") - continue - } - - stat, err := os.Stat(path) - if err != nil { - broken++ - log.Warnw("import has missing/broken carv2; please reimport", "error", err) - continue - } - - log.Infow("import ok", "size", stat.Size()) - } - - log.Infow("sanity check completed", "broken", broken, "total", len(ids)) - - return m -} - -type Meta struct { - Labels map[LabelKey]LabelValue -} - -// CreateImport initializes a new import, returning its ID and optionally a -// CAR path where to place the data, if requested. -func (m *Manager) CreateImport() (id ID, err error) { - ctx := context.TODO() - id = ID(m.counter.Next()) - - meta := &Meta{Labels: map[LabelKey]LabelValue{ - LSource: "unknown", - }} - - metajson, err := json.Marshal(meta) - if err != nil { - return 0, xerrors.Errorf("marshaling store metadata: %w", err) - } - - err = m.ds.Put(ctx, id.dsKey(), metajson) - if err != nil { - return 0, xerrors.Errorf("failed to insert import metadata: %w", err) - } - - return id, err -} - -// AllocateCAR creates a new CAR allocated to the supplied import under the -// root directory. -func (m *Manager) AllocateCAR(id ID) (path string, err error) { - ctx := context.TODO() - meta, err := m.ds.Get(ctx, id.dsKey()) - if err != nil { - return "", xerrors.Errorf("getting metadata form datastore: %w", err) - } - - var sm Meta - if err := json.Unmarshal(meta, &sm); err != nil { - return "", xerrors.Errorf("unmarshaling store meta: %w", err) - } - - // refuse if a CAR path already exists. - if curr := sm.Labels[LCARPath]; curr != "" { - return "", xerrors.Errorf("import CAR already exists at %s: %w", curr, err) - } - - path = filepath.Join(m.rootDir, fmt.Sprintf("%d.car", id)) - file, err := os.Create(path) - if err != nil { - return "", xerrors.Errorf("failed to create car file for import: %w", err) - } - - // close the file before returning the path. - if err := file.Close(); err != nil { - return "", xerrors.Errorf("failed to close temp file: %w", err) - } - - // record the path and ownership. - sm.Labels[LCARPath] = path - sm.Labels[LCAROwner] = CAROwnerImportMgr - - if meta, err = json.Marshal(sm); err != nil { - return "", xerrors.Errorf("marshaling store metadata: %w", err) - } - - err = m.ds.Put(ctx, id.dsKey(), meta) - return path, err -} - -// AddLabel adds a label associated with an import, such as the source, -// car path, CID, etc. -func (m *Manager) AddLabel(id ID, key LabelKey, value LabelValue) error { - ctx := context.TODO() - meta, err := m.ds.Get(ctx, id.dsKey()) - if err != nil { - return xerrors.Errorf("getting metadata form datastore: %w", err) - } - - var sm Meta - if err := json.Unmarshal(meta, &sm); err != nil { - return xerrors.Errorf("unmarshaling store meta: %w", err) - } - - sm.Labels[key] = value - - meta, err = json.Marshal(&sm) - if err != nil { - return xerrors.Errorf("marshaling store meta: %w", err) - } - - return m.ds.Put(ctx, id.dsKey(), meta) -} - -// List returns all import IDs known by this Manager. -func (m *Manager) List() ([]ID, error) { - ctx := context.TODO() - var keys []ID - - qres, err := m.ds.Query(ctx, query.Query{KeysOnly: true}) - if err != nil { - return nil, xerrors.Errorf("query error: %w", err) - } - defer qres.Close() //nolint:errcheck - - for r := range qres.Next() { - k := r.Key - if string(k[0]) == "/" { - k = k[1:] - } - - id, err := strconv.ParseUint(k, 10, 64) - if err != nil { - return nil, xerrors.Errorf("failed to parse key %s to uint64, err=%w", r.Key, err) - } - keys = append(keys, ID(id)) - } - - return keys, nil -} - -// Info returns the metadata known to this store for the specified import ID. -func (m *Manager) Info(id ID) (*Meta, error) { - ctx := context.TODO() - - meta, err := m.ds.Get(ctx, id.dsKey()) - if err != nil { - return nil, xerrors.Errorf("getting metadata form datastore: %w", err) - } - - var sm Meta - if err := json.Unmarshal(meta, &sm); err != nil { - return nil, xerrors.Errorf("unmarshaling store meta: %w", err) - } - - return &sm, nil -} - -// Remove drops all data associated with the supplied import ID. -func (m *Manager) Remove(id ID) error { - ctx := context.TODO() - if err := m.ds.Delete(ctx, id.dsKey()); err != nil { - return xerrors.Errorf("removing import metadata: %w", err) - } - return nil -} - -func (m *Manager) CARPathFor(dagRoot cid.Cid) (string, error) { - ids, err := m.List() - if err != nil { - return "", xerrors.Errorf("failed to fetch import IDs: %w", err) - } - - for _, id := range ids { - info, err := m.Info(id) - if err != nil { - log.Errorf("failed to fetch info, importID=%d: %s", id, err) - continue - } - if info.Labels[LRootCid] == "" { - continue - } - c, err := cid.Parse(info.Labels[LRootCid]) - if err != nil { - log.Errorf("failed to parse root cid %s: %s", info.Labels[LRootCid], err) - continue - } - if c.Equals(dagRoot) { - return info.Labels[LCARPath], nil - } - } - - return "", nil -} diff --git a/node/repo/memrepo.go b/node/repo/memrepo.go index 6a4b416e204..d1e9b214b4a 100644 --- a/node/repo/memrepo.go +++ b/node/repo/memrepo.go @@ -107,14 +107,6 @@ func (lmem *lockedMemRepo) Path() string { panic(err) // only used in tests, probably fine } - if _, ok := lmem.t.(SupportsStagingDeals); ok { - // this is required due to the method makeDealStaging from cmd/lotus-storage-miner/init.go - // deal-staging is the directory deal files are staged in before being sealed into sectors - // for offline deal flow. - if err := os.MkdirAll(filepath.Join(t, "deal-staging"), 0755); err != nil { - panic(err) - } - } if lmem.t == StorageMiner || lmem.t == Worker { lmem.initSectorStore(t) } diff --git a/node/rpc.go b/node/rpc.go index 7a47d1b68ee..ede1b924cd4 100644 --- a/node/rpc.go +++ b/node/rpc.go @@ -2,8 +2,6 @@ package node import ( "context" - "encoding/json" - "fmt" "net" "net/http" _ "net/http/pprof" @@ -11,10 +9,7 @@ import ( "strconv" "time" - "github.com/google/uuid" "github.com/gorilla/mux" - "github.com/gorilla/websocket" - "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" @@ -27,12 +22,10 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/api/v1api" - bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/lib/rpcenc" "github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/metrics/proxy" "github.com/filecoin-project/lotus/node/impl" - "github.com/filecoin-project/lotus/node/impl/client" ) var rpclog = logging.Logger("rpc") @@ -98,33 +91,6 @@ func FullNodeHandler(a v1api.FullNode, permissioned bool, opts ...jsonrpc.Server serveRpc("/rpc/v1", fnapi) serveRpc("/rpc/v0", v0) - // Import handler - handleImportFunc := handleImport(a.(*impl.FullNodeAPI)) - handleExportFunc := handleExport(a.(*impl.FullNodeAPI)) - handleRemoteStoreFunc := handleRemoteStore(a.(*impl.FullNodeAPI)) - if permissioned { - importAH := &auth.Handler{ - Verify: a.AuthVerify, - Next: handleImportFunc, - } - m.Handle("/rest/v0/import", importAH) - exportAH := &auth.Handler{ - Verify: a.AuthVerify, - Next: handleExportFunc, - } - m.Handle("/rest/v0/export", exportAH) - - storeAH := &auth.Handler{ - Verify: a.AuthVerify, - Next: handleRemoteStoreFunc, - } - m.Handle("/rest/v0/store/{uuid}", storeAH) - } else { - m.HandleFunc("/rest/v0/import", handleImportFunc) - m.HandleFunc("/rest/v0/export", handleExportFunc) - m.HandleFunc("/rest/v0/store/{uuid}", handleRemoteStoreFunc) - } - // debugging m.Handle("/debug/metrics", metrics.Exporter()) m.Handle("/debug/pprof-set/block", handleFractionOpt("BlockProfileRate", runtime.SetBlockProfileRate)) @@ -191,61 +157,6 @@ func MinerHandler(a api.StorageMiner, permissioned bool) (http.Handler, error) { return rootMux, nil } -func handleImport(a *impl.FullNodeAPI) func(w http.ResponseWriter, r *http.Request) { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != "PUT" { - w.WriteHeader(404) - return - } - if !auth.HasPerm(r.Context(), nil, api.PermWrite) { - w.WriteHeader(401) - _ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"}) - return - } - - c, err := a.ClientImportLocal(r.Context(), r.Body) - if err != nil { - w.WriteHeader(500) - _ = json.NewEncoder(w).Encode(struct{ Error string }{err.Error()}) - return - } - w.WriteHeader(200) - err = json.NewEncoder(w).Encode(struct{ Cid cid.Cid }{c}) - if err != nil { - rpclog.Errorf("/rest/v0/import: Writing response failed: %+v", err) - return - } - } -} - -func handleExport(a *impl.FullNodeAPI) func(w http.ResponseWriter, r *http.Request) { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != "GET" { - w.WriteHeader(404) - return - } - if !auth.HasPerm(r.Context(), nil, api.PermWrite) { - w.WriteHeader(401) - _ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"}) - return - } - - var eref api.ExportRef - if err := json.Unmarshal([]byte(r.FormValue("export")), &eref); err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - car := r.FormValue("car") == "true" - - err := a.ClientExportInto(r.Context(), eref, car, client.ExportDest{Writer: w}) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - } -} - func handleFractionOpt(name string, setter func(int)) http.HandlerFunc { return func(rw http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { @@ -272,34 +183,3 @@ func handleFractionOpt(name string, setter func(int)) http.HandlerFunc { setter(fr) } } - -var upgrader = websocket.Upgrader{ - CheckOrigin: func(r *http.Request) bool { - return true - }, -} - -func handleRemoteStore(a *impl.FullNodeAPI) func(w http.ResponseWriter, r *http.Request) { - return func(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - id, err := uuid.Parse(vars["uuid"]) - if err != nil { - http.Error(w, fmt.Sprintf("parse uuid: %s", err), http.StatusBadRequest) - return - } - - c, err := upgrader.Upgrade(w, r, nil) - if err != nil { - log.Error(err) - w.WriteHeader(500) - return - } - - nstore := bstore.NewNetworkStoreWS(c) - if err := a.ApiBlockstoreAccessor.RegisterApiStore(id, nstore); err != nil { - log.Errorw("registering api bstore", "error", err) - _ = c.Close() - return - } - } -} diff --git a/scripts/curio.service b/scripts/curio.service deleted file mode 100644 index 967a788fa81..00000000000 --- a/scripts/curio.service +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=Curio -After=network.target -After=lotus-daemon.service - -[Service] -ExecStart=/usr/local/bin/curio run -Environment=GOLOG_FILE="/var/log/curio/curio.log" -Environment=GOLOG_LOG_FMT="json" -LimitNOFILE=1000000 -[Install] -WantedBy=multi-user.target diff --git a/scripts/generate-lotus-cli.py b/scripts/generate-lotus-cli.py index 14e85cf9f03..9158dc2e964 100644 --- a/scripts/generate-lotus-cli.py +++ b/scripts/generate-lotus-cli.py @@ -48,7 +48,7 @@ def get_cmd_recursively(cur_cmd): # When --help is generated one needs to make sure none of the # urfave-cli `EnvVars:` defaults get triggered # Unset everything we can find via: grep -ho 'EnvVars:.*' -r * | sort -u - for e in [ "LOTUS_PATH", "LOTUS_MARKETS_PATH", "LOTUS_MINER_PATH", "LOTUS_STORAGE_PATH", "LOTUS_WORKER_PATH", "WORKER_PATH", "LOTUS_PANIC_REPORT_PATH", "WALLET_PATH" ]: + for e in [ "LOTUS_PATH", "LOTUS_MINER_PATH", "LOTUS_STORAGE_PATH", "LOTUS_WORKER_PATH", "WORKER_PATH", "LOTUS_PANIC_REPORT_PATH", "WALLET_PATH" ]: os.environ.pop(e, None) # Set env var telling the binaries that we're generating docs @@ -58,5 +58,3 @@ def get_cmd_recursively(cur_cmd): generate_lotus_cli('lotus') generate_lotus_cli('lotus-miner') generate_lotus_cli('lotus-worker') - generate_lotus_cli('curio') - generate_lotus_cli('sptool') diff --git a/scripts/publish-checksums.sh b/scripts/publish-checksums.sh index 8e5cdfe3500..ff80f388bdc 100755 --- a/scripts/publish-checksums.sh +++ b/scripts/publish-checksums.sh @@ -9,7 +9,7 @@ if [ -z "${GITHUB_TOKEN}" ]; then exit 1 fi -if [ "$GITHUB_REF" != refs/tags/* ]; then +if [[ "$GITHUB_REF" != refs/tags/* ]]; then echo "$GITHUB_REF is not a tag, publish failed" exit 1 fi diff --git a/scripts/version-check.sh b/scripts/version-check.sh index 20aeda4cc1f..4099c800ebb 100755 --- a/scripts/version-check.sh +++ b/scripts/version-check.sh @@ -32,7 +32,7 @@ function validate_lotus_version_matches_tag(){ _lotus_path=$1 -if [[ "$GITHUB_REF" != refs/tags/* ]]; then +if [[ "$GITHUB_REF" == refs/tags/* ]]; then validate_lotus_version_matches_tag "${_lotus_path}" "${GITHUB_REF#refs/tags/}" else echo "$GITHUB_REF is not a tag, skipping version check" diff --git a/storage/paths/db_index.go b/storage/paths/db_index.go index 79239544533..e6def455112 100644 --- a/storage/paths/db_index.go +++ b/storage/paths/db_index.go @@ -226,8 +226,8 @@ func (dbi *DBIndex) StorageAttach(ctx context.Context, si storiface.StorageInfo, // Insert storage id _, err = tx.Exec( - "INSERT INTO storage_path "+ - "Values($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, NOW(), $16, $17)", + "INSERT INTO storage_path (storage_id, urls, weight, max_storage, can_seal, can_store, groups, allow_to, allow_types, deny_types, capacity, available, fs_available, reserved, used, last_heartbeat, heartbeat_err, allow_miners, deny_miners)"+ + "Values($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, NOW(), NULL, $16, $17)", si.ID, strings.Join(si.URLs, ","), si.Weight, @@ -406,7 +406,7 @@ func (dbi *DBIndex) StorageDeclareSector(ctx context.Context, storageID storifac } } else { _, err = tx.Exec( - "INSERT INTO sector_location "+ + "INSERT INTO sector_location (miner_id, sector_num, sector_filetype, storage_id, is_primary)"+ "values($1, $2, $3, $4, $5)", s.Miner, s.Number, ft, storageID, primary) if err != nil { @@ -723,7 +723,7 @@ func (dbi *DBIndex) StorageBestAlloc(ctx context.Context, allocate storiface.Sec FROM storage_path WHERE available >= $1 and NOW()-($2 * INTERVAL '1 second') < last_heartbeat - and heartbeat_err = '' + and heartbeat_err IS NULL and (($3 and can_seal = TRUE) or ($4 and can_store = TRUE)) order by (available::numeric * weight) desc`, spaceReq, diff --git a/storage/paths/interface.go b/storage/paths/interface.go index d3dce8886d4..088e2340b73 100644 --- a/storage/paths/interface.go +++ b/storage/paths/interface.go @@ -47,7 +47,7 @@ type Store interface { FsStat(ctx context.Context, id storiface.ID) (fsutil.FsStat, error) - Reserve(ctx context.Context, sid storiface.SectorRef, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error) + Reserve(ctx context.Context, sid storiface.SectorRef, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int, minFreePercentage float64) (func(), error) GenerateSingleVanillaProof(ctx context.Context, minerID abi.ActorID, si storiface.PostSectorChallenge, ppt abi.RegisteredPoStProof) ([]byte, error) GeneratePoRepVanillaProof(ctx context.Context, sr storiface.SectorRef, sealed, unsealed cid.Cid, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness) ([]byte, error) diff --git a/storage/paths/local.go b/storage/paths/local.go index 244def37726..07223ad5317 100644 --- a/storage/paths/local.go +++ b/storage/paths/local.go @@ -36,6 +36,8 @@ type LocalStorage interface { const MetaFile = "sectorstore.json" +const MinFreeStoragePercentage = float64(0) + type Local struct { localStorage LocalStorage index SectorIndex @@ -460,13 +462,13 @@ func (st *Local) reportStorage(ctx context.Context) { } } -func (st *Local) Reserve(ctx context.Context, sid storiface.SectorRef, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (release func(), err error) { - var ssize abi.SectorSize - ssize, err = sid.ProofType.SectorSize() +func (st *Local) Reserve(ctx context.Context, sid storiface.SectorRef, ft storiface.SectorFileType, + storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int, minFreePercentage float64) (func(), error) { + ssize, err := sid.ProofType.SectorSize() if err != nil { return nil, err } - release = func() {} + release := func() {} st.localLk.Lock() @@ -501,10 +503,18 @@ func (st *Local) Reserve(ctx context.Context, sid storiface.SectorRef, ft storif resvOnDisk = overhead } - if stat.Available < overhead-resvOnDisk { + overheadOnDisk := overhead - resvOnDisk + + if stat.Available < overheadOnDisk { return nil, storiface.Err(storiface.ErrTempAllocateSpace, xerrors.Errorf("can't reserve %d bytes in '%s' (id:%s), only %d available", overhead, p.local, id, stat.Available)) } + freePercentag := (float64(stat.Available-overheadOnDisk) / float64(stat.Available)) * 100.0 + + if freePercentag < minFreePercentage { + return nil, storiface.Err(storiface.ErrTempAllocateSpace, xerrors.Errorf("can't reserve %d bytes in '%s' (id:%s), free disk percentage %f will be lower than minimum %f", overhead, p.local, id, freePercentag, minFreePercentage)) + } + resID := sectorFile{sid.ID, fileType} log.Debugw("reserve add", "id", id, "sector", sid, "fileType", fileType, "overhead", overhead, "reserved-before", p.reserved, "reserved-after", p.reserved+overhead) @@ -523,7 +533,7 @@ func (st *Local) Reserve(ctx context.Context, sid storiface.SectorRef, ft storif } } - return + return release, nil } // DoubleCallWrap wraps a function to make sure it's not called twice @@ -533,7 +543,7 @@ func DoubleCallWrap(f func()) func() { curStack := make([]byte, 20480) curStack = curStack[:runtime.Stack(curStack, false)] if len(stack) > 0 { - log.Warnf("double call from:\n%s\nBut originally from:", curStack, stack) + log.Warnf("double call from:\n%s\nBut originally from:\n%s", curStack, stack) return } stack = curStack diff --git a/storage/paths/mocks/store.go b/storage/paths/mocks/store.go index 1224e6b571f..1303cf0690b 100644 --- a/storage/paths/mocks/store.go +++ b/storage/paths/mocks/store.go @@ -154,16 +154,16 @@ func (mr *MockStoreMockRecorder) RemoveCopies(arg0, arg1, arg2 interface{}) *gom } // Reserve mocks base method. -func (m *MockStore) Reserve(arg0 context.Context, arg1 storiface.SectorRef, arg2 storiface.SectorFileType, arg3 storiface.SectorPaths, arg4 map[storiface.SectorFileType]int) (func(), error) { +func (m *MockStore) Reserve(arg0 context.Context, arg1 storiface.SectorRef, arg2 storiface.SectorFileType, arg3 storiface.SectorPaths, arg4 map[storiface.SectorFileType]int, arg5 float64) (func(), error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Reserve", arg0, arg1, arg2, arg3, arg4) + ret := m.ctrl.Call(m, "Reserve", arg0, arg1, arg2, arg3, arg4, arg5) ret0, _ := ret[0].(func()) ret1, _ := ret[1].(error) return ret0, ret1 } // Reserve indicates an expected call of Reserve. -func (mr *MockStoreMockRecorder) Reserve(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockStoreMockRecorder) Reserve(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reserve", reflect.TypeOf((*MockStore)(nil).Reserve), arg0, arg1, arg2, arg3, arg4) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reserve", reflect.TypeOf((*MockStore)(nil).Reserve), arg0, arg1, arg2, arg3, arg4, arg5) } diff --git a/storage/paths/remote.go b/storage/paths/remote.go index ab27548632c..2cad97537fc 100644 --- a/storage/paths/remote.go +++ b/storage/paths/remote.go @@ -177,7 +177,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s storiface.SectorRef, exist // If any path types weren't found in local storage, try fetching them // First reserve storage - releaseStorage, err := r.local.Reserve(ctx, s, toFetch, fetchIDs, overheadTable) + releaseStorage, err := r.local.Reserve(ctx, s, toFetch, fetchIDs, overheadTable, MinFreeStoragePercentage) if err != nil { return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("reserving storage space: %w", err) } @@ -812,7 +812,7 @@ func (r *Remote) ReaderSeq(ctx context.Context, s storiface.SectorRef, ft storif return nil, xerrors.Errorf("failed to read sector %v from remote(%d): %w", s, ft, storiface.ErrSectorNotFound) } -func (r *Remote) Reserve(ctx context.Context, sid storiface.SectorRef, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error) { +func (r *Remote) Reserve(ctx context.Context, sid storiface.SectorRef, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int, minFreePercentage float64) (func(), error) { log.Warnf("reserve called on remote store, sectorID: %v", sid.ID) return func() { diff --git a/storage/pipeline/numassign.go b/storage/pipeline/numassign.go index 60dddec98ea..aaeb023ed51 100644 --- a/storage/pipeline/numassign.go +++ b/storage/pipeline/numassign.go @@ -212,7 +212,7 @@ func (m *Sealing) NumReserve(ctx context.Context, name string, reserving bitfiel return m.numReserveLocked(ctx, name, reserving, force) } -// NumReserve creates a new sector reservation +// numReserveLocked creates a new sector reservation func (m *Sealing) numReserveLocked(ctx context.Context, name string, reserving bitfield.BitField, force bool) error { rk, err := reservationKey(name) if err != nil { diff --git a/storage/pipeline/piece/piece_info.go b/storage/pipeline/piece/piece_info.go index 48e15751ad0..7c34e94e7ca 100644 --- a/storage/pipeline/piece/piece_info.go +++ b/storage/pipeline/piece/piece_info.go @@ -170,6 +170,15 @@ func (ds *PieceDealInfo) String() string { } } +func (ds *PieceDealInfo) Size() abi.PaddedPieceSize { + switch { + case ds.isBuiltinMarketDeal(): + return ds.DealProposal.PieceSize + default: + return ds.PieceActivationManifest.Size + } +} + func (ds *PieceDealInfo) KeepUnsealedRequested() bool { return ds.KeepUnsealed } diff --git a/storage/pipeline/states_sealing.go b/storage/pipeline/states_sealing.go index 81ee85853c0..795cabdb34d 100644 --- a/storage/pipeline/states_sealing.go +++ b/storage/pipeline/states_sealing.go @@ -33,6 +33,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/lib/filler" "github.com/filecoin-project/lotus/storage/pipeline/lib/nullreader" "github.com/filecoin-project/lotus/storage/sealer/storiface" ) @@ -88,7 +89,7 @@ func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) err return xerrors.Errorf("too much data in sector: %d > %d", allocated, ubytes) } - fillerSizes, err := fillersFromRem(ubytes - allocated) + fillerSizes, err := filler.FillersFromRem(ubytes - allocated) if err != nil { return err } diff --git a/storage/pipeline/utils.go b/storage/pipeline/utils.go index ac519b6acef..373859f6dd5 100644 --- a/storage/pipeline/utils.go +++ b/storage/pipeline/utils.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "fmt" - "math/bits" "github.com/ipfs/go-cid" "golang.org/x/xerrors" @@ -18,42 +17,6 @@ import ( "github.com/filecoin-project/lotus/storage/pipeline/sealiface" ) -func fillersFromRem(in abi.UnpaddedPieceSize) ([]abi.UnpaddedPieceSize, error) { - // Convert to in-sector bytes for easier math: - // - // Sector size to user bytes ratio is constant, e.g. for 1024B we have 1016B - // of user-usable data. - // - // (1024/1016 = 128/127) - // - // Given that we can get sector size by simply adding 1/127 of the user - // bytes - // - // (we convert to sector bytes as they are nice round binary numbers) - - toFill := uint64(in + (in / 127)) - - // We need to fill the sector with pieces that are powers of 2. Conveniently - // computers store numbers in binary, which means we can look at 1s to get - // all the piece sizes we need to fill the sector. It also means that number - // of pieces is the number of 1s in the number of remaining bytes to fill - out := make([]abi.UnpaddedPieceSize, bits.OnesCount64(toFill)) - for i := range out { - // Extract the next lowest non-zero bit - next := bits.TrailingZeros64(toFill) - psize := uint64(1) << next - // e.g: if the number is 0b010100, psize will be 0b000100 - - // set that bit to 0 by XORing it, so the next iteration looks at the - // next bit - toFill ^= psize - - // Add the piece size to the list of pieces we need to create - out[i] = abi.PaddedPieceSize(psize).Unpadded() - } - return out, nil -} - func (m *Sealing) ListSectors() ([]SectorInfo, error) { var sectors []SectorInfo if err := m.sectors.List(§ors); err != nil { diff --git a/storage/sealer/ffiwrapper/sealer_cgo.go b/storage/sealer/ffiwrapper/sealer_cgo.go index f1613033150..80f06ad0cd0 100644 --- a/storage/sealer/ffiwrapper/sealer_cgo.go +++ b/storage/sealer/ffiwrapper/sealer_cgo.go @@ -1421,6 +1421,13 @@ func GenerateUnsealedCID(proofType abi.RegisteredSealProof, pieces []abi.PieceIn return ffi.GenerateUnsealedCID(proofType, allPieces) } +func (sb *Sealer) GenerateSingleVanillaProof( + replica ffi.PrivateSectorInfo, + challenges []uint64, +) ([]byte, error) { + return ffi.GenerateSingleVanillaProof(replica, challenges) +} + func (sb *Sealer) GenerateWinningPoStWithVanilla(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, vanillas [][]byte) ([]proof.PoStProof, error) { return ffi.GenerateWinningPoStWithVanilla(proofType, minerID, randomness, vanillas) } diff --git a/storage/sealer/mock/mock.go b/storage/sealer/mock/mock.go index e33be847715..958a246a74e 100644 --- a/storage/sealer/mock/mock.go +++ b/storage/sealer/mock/mock.go @@ -13,7 +13,6 @@ import ( logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" - "github.com/filecoin-project/dagstore/mount" commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper" commcid "github.com/filecoin-project/go-fil-commcid" "github.com/filecoin-project/go-state-types/abi" @@ -435,7 +434,7 @@ func (mgr *SectorMgr) GenerateWindowPoStWithVanilla(ctx context.Context, proofTy panic("implement me") } -func (mgr *SectorMgr) ReadPiece(ctx context.Context, sector storiface.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (mount.Reader, bool, error) { +func (mgr *SectorMgr) ReadPiece(ctx context.Context, sector storiface.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (storiface.Reader, bool, error) { off := storiface.UnpaddedByteIndex(0) var piece cid.Cid diff --git a/storage/sealer/piece_provider.go b/storage/sealer/piece_provider.go index 0e992b67918..3d177665acf 100644 --- a/storage/sealer/piece_provider.go +++ b/storage/sealer/piece_provider.go @@ -10,7 +10,6 @@ import ( pool "github.com/libp2p/go-buffer-pool" "golang.org/x/xerrors" - "github.com/filecoin-project/dagstore/mount" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/storage/paths" @@ -29,7 +28,7 @@ type PieceProvider interface { // default in most cases, but this might matter with future PoRep) // startOffset is added to the pieceOffset to get the starting reader offset. // The number of bytes that can be read is pieceSize-startOffset - ReadPiece(ctx context.Context, sector storiface.SectorRef, pieceOffset storiface.UnpaddedByteIndex, pieceSize abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (mount.Reader, bool, error) + ReadPiece(ctx context.Context, sector storiface.SectorRef, pieceOffset storiface.UnpaddedByteIndex, pieceSize abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (storiface.Reader, bool, error) IsUnsealed(ctx context.Context, sector storiface.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) } @@ -73,7 +72,7 @@ func (p *pieceProvider) IsUnsealed(ctx context.Context, sector storiface.SectorR // It will NOT try to schedule an Unseal of a sealed sector file for the read. // // Returns a nil reader if the piece does NOT exist in any unsealed file or there is no unsealed file for the given sector on any of the workers. -func (p *pieceProvider) tryReadUnsealedPiece(ctx context.Context, pc cid.Cid, sector storiface.SectorRef, pieceOffset storiface.UnpaddedByteIndex, pieceSize abi.UnpaddedPieceSize) (mount.Reader, error) { +func (p *pieceProvider) tryReadUnsealedPiece(ctx context.Context, pc cid.Cid, sector storiface.SectorRef, pieceOffset storiface.UnpaddedByteIndex, pieceSize abi.UnpaddedPieceSize) (storiface.Reader, error) { // acquire a lock purely for reading unsealed sectors ctx, cancel := context.WithCancel(ctx) if err := p.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed, storiface.FTNone); err != nil { @@ -169,7 +168,7 @@ var _ io.Closer = funcCloser(nil) // If we do NOT have an existing unsealed file containing the given piece thus causing us to schedule an Unseal, // the returned boolean parameter will be set to true. // If we have an existing unsealed file containing the given piece, the returned boolean will be set to false. -func (p *pieceProvider) ReadPiece(ctx context.Context, sector storiface.SectorRef, pieceOffset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (mount.Reader, bool, error) { +func (p *pieceProvider) ReadPiece(ctx context.Context, sector storiface.SectorRef, pieceOffset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (storiface.Reader, bool, error) { if err := pieceOffset.Valid(); err != nil { return nil, false, xerrors.Errorf("pieceOffset is not valid: %w", err) } @@ -224,3 +223,5 @@ func (p *pieceProvider) ReadPiece(ctx context.Context, sector storiface.SectorRe return r, uns, nil } + +var _ storiface.Reader = &pieceReader{} diff --git a/storage/sealer/piece_reader.go b/storage/sealer/piece_reader.go index 7a7cd184110..37fb4488c9c 100644 --- a/storage/sealer/piece_reader.go +++ b/storage/sealer/piece_reader.go @@ -12,7 +12,6 @@ import ( "go.opencensus.io/tag" "golang.org/x/xerrors" - "github.com/filecoin-project/dagstore/mount" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/metrics" @@ -303,5 +302,3 @@ func (p *pieceReader) readInto(b []byte, off int64) (n int, err error) { return n, cerr } - -var _ mount.Reader = (*pieceReader)(nil) diff --git a/storage/sealer/proofpaths/cachefiles.go b/storage/sealer/proofpaths/cachefiles.go index 628ab158565..cbb6839ce95 100644 --- a/storage/sealer/proofpaths/cachefiles.go +++ b/storage/sealer/proofpaths/cachefiles.go @@ -53,3 +53,11 @@ func SDRLayers(spt abi.RegisteredSealProof) (int, error) { return 0, fmt.Errorf("unsupported proof type: %v", spt) } } + +func IsTreeRCFile(baseName string) bool { + return IsFileTreeRLast(baseName) || IsFileTreeC(baseName) +} + +func IsTreeDFile(baseName string) bool { + return IsFileTreeD(baseName) +} diff --git a/storage/sealer/storiface/storage.go b/storage/sealer/storiface/storage.go index 143c3b5d560..91ab12805ca 100644 --- a/storage/sealer/storiface/storage.go +++ b/storage/sealer/storiface/storage.go @@ -13,6 +13,16 @@ import ( type Data = io.Reader +// Reader is a fully-featured Reader. It is the +// union of the standard IO sequential access method (Read), with seeking +// ability (Seek), as well random access (ReadAt). +type Reader interface { + io.Closer + io.Reader + io.ReaderAt + io.Seeker +} + type SectorRef struct { ID abi.SectorID ProofType abi.RegisteredSealProof diff --git a/storage/sealer/worker_local.go b/storage/sealer/worker_local.go index 417a15e62b1..82f939b1040 100644 --- a/storage/sealer/worker_local.go +++ b/storage/sealer/worker_local.go @@ -150,19 +150,19 @@ type localWorkerPathProvider struct { } func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType) (storiface.SectorPaths, func(), error) { - paths, storageIDs, err := l.w.storage.AcquireSector(ctx, sector, existing, allocate, sealing, l.op) + spaths, storageIDs, err := l.w.storage.AcquireSector(ctx, sector, existing, allocate, sealing, l.op) if err != nil { return storiface.SectorPaths{}, nil, err } - releaseStorage, err := l.w.localStore.Reserve(ctx, sector, allocate, storageIDs, storiface.FSOverheadSeal) + releaseStorage, err := l.w.localStore.Reserve(ctx, sector, allocate, storageIDs, storiface.FSOverheadSeal, paths.MinFreeStoragePercentage) if err != nil { return storiface.SectorPaths{}, nil, xerrors.Errorf("reserving storage space: %w", err) } - log.Debugf("acquired sector %d (e:%d; a:%d): %v", sector, existing, allocate, paths) + log.Debugf("acquired sector %d (e:%d; a:%d): %v", sector, existing, allocate, spaths) - return paths, func() { + return spaths, func() { releaseStorage() for _, fileType := range storiface.PathTypes { @@ -675,6 +675,8 @@ func (l *LocalWorker) GenerateWindowPoStAdv(ctx context.Context, ppt abi.Registe go func(i int, s storiface.PostSectorChallenge) { defer wg.Done() + ctx := ctx + defer func() { if l.challengeThrottle != nil { <-l.challengeThrottle