diff --git a/.cargo/config.toml b/.cargo/config.toml index 330d8612907..1368e393fe4 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -56,6 +56,9 @@ rustflags = [ "-Wmissing_docs", # TODOs: + # Fix this lint eventually. + "-Aclippy::result_large_err", + # `cargo fix` might help do these fixes, # or add a config.toml to sub-directories which should allow these lints, # or try allowing the lint in the specific module (lib.rs doesn't seem to work in some cases) @@ -80,7 +83,7 @@ rustflags = [ [build] rustdocflags = [ # The -A and -W settings must be the same as the `RUSTDOCFLAGS` in: - # https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/docs.yml#L61 + # https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/docs-deploy-firebase.yml#L68 # Links in public docs can point to private items. "-Arustdoc::private_intra_doc_links", diff --git a/.codespellrc b/.codespellrc index abf12fb0d05..2d242dca9ff 100644 --- a/.codespellrc +++ b/.codespellrc @@ -1,3 +1,3 @@ [codespell] -ignore-words-list=crate,Sur,inout +ignore-words-list=crate,Sur,inout,Groth,groth,re-use, exclude-file=book/mermaid.min.js diff --git a/.github/ISSUE_TEMPLATE/release.md b/.github/ISSUE_TEMPLATE/release.md index 1dab363d981..0e2ee30b7b1 100644 --- a/.github/ISSUE_TEMPLATE/release.md +++ b/.github/ISSUE_TEMPLATE/release.md @@ -16,7 +16,7 @@ They can be skipped for urgent releases. To check consensus correctness, we want to test that the state format is valid after a full sync. (Format upgrades are tested in CI on each PR.) -- [ ] Make sure there has been [at least one successful full sync test](https://github.com/ZcashFoundation/zebra/actions/workflows/continous-integration-docker.yml?query=event%3Aschedule) since the last state change, or +- [ ] Make sure there has been [at least one successful full sync test](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-integration-tests-gcp.yml?query=event%3Aschedule) since the last state change, or - [ ] Start a manual workflow run with a Zebra and `lightwalletd` full sync. State format changes can be made in `zebra-state` or `zebra-chain`. The state format can be changed by data that is sent to the state, data created within the state using `zebra-chain`, or serialization formats in `zebra-state` or `zebra-chain`. diff --git a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md index 48efec453f5..5445834df3e 100644 --- a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md +++ b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md @@ -9,7 +9,7 @@ assignees: '' # Prepare for the Release -- [ ] Make sure there has been [at least one successful full sync test](https://github.com/ZcashFoundation/zebra/actions/workflows/continous-integration-docker.yml?query=event%3Aschedule) since the last state change, or start a manual full sync. +- [ ] Make sure there has been [at least one successful full sync test](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-integration-tests-gcp.yml?query=event%3Aschedule) since the last state change, or start a manual full sync. - [ ] Make sure the PRs with the new checkpoint hashes and missed dependencies are already merged. (See the release ticket checklist for details) @@ -83,13 +83,22 @@ Check that the release will work: - [ ] Update crate versions, commit the changes to the release branch, and do a release dry-run: ```sh -cargo release version --verbose --execute --allow-branch '*' --workspace --exclude zebrad beta +# Update everything except for alpha crates and zebrad: +cargo release version --verbose --execute --allow-branch '*' --workspace --exclude zebrad --exclude zebra-scan --exclude zebra-grpc beta +# Due to a bug in cargo-release, we need to pass exact versions for alpha crates: +cargo release version --verbose --execute --allow-branch '*' --package zebra-scan 0.1.0-alpha.4 +cargo release version --verbose --execute --allow-branch '*' --package zebra-grpc 0.1.0-alpha.2 +# Update zebrad: cargo release version --verbose --execute --allow-branch '*' --package zebrad patch # [ major | minor | patch ] +# Continue with the release process: cargo release replace --verbose --execute --allow-branch '*' --package zebrad cargo release commit --verbose --execute --allow-branch '*' ``` -Crate publishing is [automatically checked in CI](https://github.com/ZcashFoundation/zebra/actions/workflows/release-crates-io.yml) using "dry run" mode. +Crate publishing is [automatically checked in CI](https://github.com/ZcashFoundation/zebra/actions/workflows/release-crates-io.yml) using "dry run" mode, however due to a bug in `cargo-release` we need to pass exact versions to the alpha crates: + +- [ ] Update `zebra-scan` and `zebra-grpc` alpha crates in the [release-crates-dry-run workflow script](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/scripts/release-crates-dry-run.sh) +- [ ] Push the above version changes to the release branch. ## Update End of Support @@ -131,8 +140,10 @@ The end of support height is calculated from the current blockchain height: ## Test the Pre-Release -- [ ] Wait until the [Docker binaries have been built on `main`](https://github.com/ZcashFoundation/zebra/actions/workflows/continous-integration-docker.yml), and the quick tests have passed. -- [ ] Wait until the [pre-release deployment machines have successfully launched](https://github.com/ZcashFoundation/zebra/actions/workflows/continous-delivery.yml) +- [ ] Wait until the Docker binaries have been built on `main`, and the quick tests have passed: + - [ ] [ci-unit-tests-docker.yml](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-unit-tests-docker.yml?query=branch%3Amain) + - [ ] [ci-integration-tests-gcp.yml](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-integration-tests-gcp.yml?query=branch%3Amain) +- [ ] Wait until the [pre-release deployment machines have successfully launched](https://github.com/ZcashFoundation/zebra/actions/workflows/cd-deploy-nodes-gcp.yml?query=event%3Arelease) ## Publish Release @@ -148,7 +159,7 @@ The end of support height is calculated from the current blockchain height: and put the output in a comment on the PR. ## Publish Docker Images -- [ ] Wait for the [the Docker images to be published successfully](https://github.com/ZcashFoundation/zebra/actions/workflows/release-binaries.yml). +- [ ] Wait for the [the Docker images to be published successfully](https://github.com/ZcashFoundation/zebra/actions/workflows/release-binaries.yml?query=event%3Arelease). - [ ] Un-freeze the [`batched` queue](https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/queues) using Mergify. - [ ] Remove `do-not-merge` from the PRs you added it to diff --git a/.github/PULL_REQUEST_TEMPLATE/rfc.md b/.github/PULL_REQUEST_TEMPLATE/rfc.md deleted file mode 100644 index 3502f132623..00000000000 --- a/.github/PULL_REQUEST_TEMPLATE/rfc.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -name: RFC Template -about: A design RFC for Zebra -title: '' -labels: C-design -assignees: '' ---- - - - -## RFC Summary - -**Please copy the RFC summary here.** - -### Context - -**Please copy the RFC header here.** - -Feature Name: `my_feature` - -Start Date: YYYY-MM-DD - -Design PR: [ZcashFoundation/zebra#0000](https://github.com/ZcashFoundation/zebra/pull/0000) - -Zebra Issue: [ZcashFoundation/zebra#0000](https://github.com/ZcashFoundation/zebra/issues/0000) - -### Rendered - - -[Rendered](https://github.com/ZcashFoundation/zebra/blob/my-branch-name/book/src/dev/rfcs/drafts/xxxx-my-feature.md). - -## Zebra Team Approval - -Most of the Zebra team should review design RFCs: - -- [ ] @conradoplg -- [ ] @dconnolly -- [ ] @oxarbitrage -- [ ] @jvff -- [ ] @mpguerra -- [ ] @teor2345 diff --git a/.github/dependabot.yml b/.github/dependabot.yml index d229bd8eedb..314ec41eddc 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,5 +1,6 @@ version: 2 updates: + # Rust section - package-ecosystem: cargo directory: '/' # serde, clap, and other dependencies sometimes have multiple updates in a week @@ -18,110 +19,14 @@ updates: ecc: patterns: # deliberately include zcash_script (even though it is maintained by ZF) - - "zcash_*" - - "orchard" - - "halo2*" - - "incrementalmerkletree" - - "bridgetree" - - "equihash" - # addresses - - "bs58" - - "ripemd" - # groups are limited to 10 items - crypto: - patterns: - - "bellman" - # reddsa, redjubjub - - "red*" - - "jubjub" - - "group" - - "bls12_381" - - "blake*" - - "secp256k1" - - "sha2" - - "*25519*" - - "rand*" - async: - patterns: - - "tokio*" - - "console-subscriber" - - "tower*" - - "hyper*" - - "h2" - - "reqwest" - - "futures*" - - "pin-project*" - log-time: - patterns: - - "tracing*" - - "log" - - "*eyre*" - - "thiserror" - # displaydoc, spandoc - - "*doc" - - "owo-colors" - - "sentry*" - - "metrics*" - # time, humantime - - "*time*" - - "chrono*" - concurrency: - patterns: - - "once_cell" - - "lazy_static" - - "rayon*" - - "crossbeam*" - - "num_cpus" - progress-bar: - patterns: - - "indicatif" - - "howudoin" - app: - patterns: - - "abscissa*" - - "structopt*" - - "clap*" - - "atty*" - - "semver*" - # dirs, directories, directories-next - - "dir*" - - "vergen" - - "*git*" - - "toml*" - - "rlimit" - formats: - patterns: - - "serde*" - - "jsonrpc*" - - "hex*" - - "regex" - - "byteorder" - - "bytes" - - "bincode" - data-structures: - patterns: - - "bitflags*" - - "bitvec" - - "indexmap" - - "num-integer" - - "primitive-types" - - "uint" - - "tinyvec" - - "itertools" - - "ordered-map" - - "mset" - test: - patterns: - - "proptest*" - - "insta" - - "prost*" - - "tonic*" - # depends on tonic and prost - - "console-subscriber" - - "tempfile" - - "static_assertions" - - "criterion" - - "inferno" + - "zcash_*|orchard|halo2*|incrementalmerkletree|bridgetree|equihash" + prod: + dependency-type: "production" + exclude-patterns: + - "zcash_*|orchard|halo2*|incrementalmerkletree|bridgetree|equihash" + dev: + dependency-type: "development" + # Devops section - package-ecosystem: github-actions directory: '/' schedule: @@ -135,3 +40,7 @@ updates: - 'A-devops' - 'A-dependencies' - 'P-Low :snowflake:' + groups: + devops: + patterns: + - "*" \ No newline at end of file diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index e6d94a61736..f8ab545c658 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,34 +1,55 @@ ## Motivation - +_What are the most important goals of the ticket or PR?_ + + +### PR Author Checklist + +#### Check before marking the PR as ready for review: + - [ ] Will the PR name make sense to users? + - [ ] Does the PR have a priority label? + - [ ] Have you added or updated tests? + - [ ] Is the documentation up to date? + +##### For significant changes: + - [ ] Is there a summary in the CHANGELOG? + - [ ] Can these changes be split into multiple PRs? + +_If a checkbox isn't relevant to the PR, mark it as done._ ### Specifications + ### Complex Code or Requirements + ## Solution + +### Testing + + + + ## Review + ### Reviewer Checklist - - [ ] Will the PR name make sense to users? - - [ ] Does it need extra CHANGELOG info? (new features, breaking changes, large changes) - - [ ] Are the PR labels correct? - - [ ] Does the code do what the ticket and PR says? - - [ ] Does it change concurrent code, unsafe code, or consensus rules? - - [ ] How do you know it works? Does it have tests? +Check before approving the PR: + - [ ] Does the PR scope match the ticket? + - [ ] Are there enough tests to make sure it works? Do the tests cover the PR motivation? + - [ ] Are all the PR blockers dealt with? + PR blockers can be dealt with in new tickets or PRs. + +_And check the PR Author checklist is complete._ ## Follow Up Work diff --git a/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml b/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml new file mode 100644 index 00000000000..5c1e05a56e2 --- /dev/null +++ b/.github/workflows/cd-deploy-nodes-gcp.patch-external.yml @@ -0,0 +1,38 @@ +# Workflow patches for skipping Google Cloud CD deployments on PRs from external repositories. +name: Deploy Nodes to GCP + +# Run on PRs from external repositories, let them pass, and then Mergify will check them. +# GitHub doesn't support filtering workflows by source branch names, so we have to do it for each +# job. +on: + pull_request: + +# IMPORTANT +# +# The job names in `cd-deploy-nodes-gcp.yml`, `cd-deploy-nodes-gcp.patch.yml` and +# `cd-deploy-nodes-gcp.patch-external.yml` must be kept in sync. +jobs: + # We don't patch the testnet job, because testnet isn't required to merge (it's too unstable) + build: + name: Build CD Docker / Build images + # Only run on PRs from external repositories, skipping ZF branches and tags. + if: ${{ startsWith(github.event_name, 'pull') && github.event.pull_request.head.repo.fork }} + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' + + test-configuration-file: + name: Test CD default Docker config file / Test default-conf in Docker + # This dependency allows all these jobs to depend on a single condition, making it easier to + # change. + needs: build + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' + + test-zebra-conf-path: + name: Test CD custom Docker config file / Test custom-conf in Docker + needs: build + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' diff --git a/.github/workflows/cd-deploy-nodes-gcp.patch.yml b/.github/workflows/cd-deploy-nodes-gcp.patch.yml index 457bedc2a3b..b1994104329 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.patch.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.patch.yml @@ -1,5 +1,8 @@ +# Workflow patches for skipping Google Cloud CD deployments, when Rust code or dependencies aren't +# modified in a PR. name: Deploy Nodes to GCP +# Run on PRs with unmodified code and dependency files. on: pull_request: paths-ignore: @@ -19,7 +22,12 @@ on: - '.github/workflows/cd-deploy-nodes-gcp.yml' - '.github/workflows/sub-build-docker-image.yml' +# IMPORTANT +# +# The job names in `cd-deploy-nodes-gcp.yml`, `cd-deploy-nodes-gcp.patch.yml` and +# `cd-deploy-nodes-gcp.patch-external.yml` must be kept in sync. jobs: + # We don't patch the testnet job, because testnet isn't required to merge (it's too unstable) build: name: Build CD Docker / Build images runs-on: ubuntu-latest @@ -36,4 +44,4 @@ jobs: name: Test CD custom Docker config file / Test custom-conf in Docker runs-on: ubuntu-latest steps: - - run: 'echo "No build required"' \ No newline at end of file + - run: 'echo "No build required"' diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index bf6b9cb5842..70caeb13ac0 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -1,3 +1,16 @@ +# Google Cloud node deployments and tests that run when Rust code or dependencies are modified, +# but only on PRs from the ZcashFoundation/zebra repository. +# (External PRs are tested/deployed by mergify.) +# +# 1. `versioning`: Extracts the major version from the release semver. Useful for segregating instances based on major versions. +# 2. `build`: Builds a Docker image named `zebrad` with the necessary tags derived from Git. +# 3. `test-configuration-file`: Validates Zebra using the default config with the latest version. +# 4. `test-configuration-file-testnet`: Tests the Docker image for the testnet configuration. +# 5. `test-zebra-conf-path`: Verifies Zebra with a custom Docker config file. +# 6. `deploy-nodes`: Deploys Managed Instance Groups (MiGs) for Mainnet and Testnet. If triggered by main branch pushes, it always replaces the MiG. For releases, MiGs are replaced only if deploying the same major version; otherwise, a new major version is deployed. +# 7. `deploy-instance`: Deploys a single node in a specified GCP zone for testing specific commits. Instances from this job aren't auto-replaced or deleted. +# +# The overall goal is to ensure that Zebra nodes are consistently deployed, tested, and managed on GCP. name: Deploy Nodes to GCP # Ensures that only one workflow task will run at a time. Previous deployments, if @@ -31,6 +44,7 @@ on: # TODO: Temporarily disabled to reduce network load, see #6894. #push: + # # Skip main branch updates where Rust code and dependencies aren't modified. # branches: # - main # paths: @@ -52,6 +66,7 @@ on: # Only runs the Docker image tests, doesn't deploy any instances pull_request: + # Skip PRs where Rust code and dependencies aren't modified. paths: # code and tests - '**/*.rs' @@ -73,6 +88,10 @@ on: types: - published +# IMPORTANT +# +# The job names in `cd-deploy-nodes-gcp.yml`, `cd-deploy-nodes-gcp.patch.yml` and +# `cd-deploy-nodes-gcp.patch-external.yml` must be kept in sync. jobs: # If a release was made we want to extract the first part of the semver from the # tag_name @@ -92,7 +111,7 @@ jobs: steps: - name: Getting Zebrad Version id: get - uses: actions/github-script@v6.4.1 + uses: actions/github-script@v7.0.1 with: result-encoding: string script: | @@ -107,6 +126,9 @@ jobs: # The image will be commonly named `zebrad:` build: name: Build CD Docker + # Skip PRs from external repositories, let them pass, and then Mergify will check them. + # This workflow also runs on release tags, the event name check will run it on releases. + if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }} uses: ./.github/workflows/sub-build-docker-image.yml with: dockerfile_path: ./docker/Dockerfile @@ -148,7 +170,7 @@ jobs: with: test_id: 'custom-conf' docker_image: ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} - grep_patterns: '-e "v1.0.0-rc.2.toml"' + grep_patterns: '-e "loaded zebrad config.*config_path.*=.*v1.0.0-rc.2.toml"' test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="zebrad/tests/common/configs/v1.0.0-rc.2.toml"' network: ${{ inputs.network || vars.ZCASH_NETWORK }} @@ -204,14 +226,14 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v1.1.1 + uses: google-github-actions/auth@v2.1.1 with: retries: '3' workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v1.1.1 + uses: google-github-actions/setup-gcloud@v2.1.0 # TODO we should implement the fixes from https://github.com/ZcashFoundation/zebra/pull/5670 here # but the implementation is failing as it's requiring the disk names, contrary to what is stated in the official documentation @@ -306,14 +328,14 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v1.1.1 + uses: google-github-actions/auth@v2.1.1 with: retries: '3' workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v1.1.1 + uses: google-github-actions/setup-gcloud@v2.1.0 # Create instance template from container image - name: Manual deploy of a single ${{ inputs.network }} instance running zebrad diff --git a/.github/workflows/chore-delete-gcp-resources.yml b/.github/workflows/chore-delete-gcp-resources.yml index 19ed00db56d..a623037059b 100644 --- a/.github/workflows/chore-delete-gcp-resources.yml +++ b/.github/workflows/chore-delete-gcp-resources.yml @@ -1,4 +1,13 @@ -# TODO: rename this action name and filename to Delete infra resources +# This workflow is designed to delete old Google Cloud Platform (GCP) resources to save on costs. +# +# 1. Deletes specific instances in GCP older than a defined number of days. +# 2. Deletes instance templates older than a set number of days. +# 3. Deletes older disks not currently in use, with certain ones prefixed by commit hashes or "zebrad-". +# 4. Deletes cache images from GCP, retaining a specified number of the latest images for certain types like zebrad checkpoint cache, zebrad tip cache, and lightwalletd + zebrad tip cache. +# 5. Deletes unused artifacts from Google Artifact Registry older than a defined number of hours while retaining the latest few. +# +# It uses the gcloud CLI for most of its operations and also leverages specific GitHub Actions like the gcr-cleaner for deleting old images from the Google Artifact Registry. +# The workflow is scheduled to run daily at 0700 UTC. name: Delete GCP resources on: @@ -37,14 +46,14 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v1.1.1 + uses: google-github-actions/auth@v2.1.1 with: retries: '3' workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v1.1.1 + uses: google-github-actions/setup-gcloud@v2.1.0 # Deletes all mainnet and testnet instances older than $DELETE_INSTANCE_DAYS days. # @@ -56,29 +65,11 @@ jobs: # so it can't be shell-quoted. - name: Delete old instances run: | - DELETE_BEFORE_DATE=$(date --date="$DELETE_INSTANCE_DAYS days ago" '+%Y%m%d') - - IFS=$'\n' - INSTANCES=$(gcloud compute instances list --sort-by=creationTimestamp --filter="name~-[0-9a-f]{7,}$ AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME,ZONE)' | \ - sed 's/\(.*\)\t\(.*\)/\1 --zone=\2/') - - for INSTANCE_AND_ZONE in $INSTANCES - do - IFS=$' ' - gcloud compute instances delete --verbosity=info ${INSTANCE_AND_ZONE} --delete-disks=all || continue - IFS=$'\n' - done - + ./.github/workflows/scripts/gcp-delete-old-instances.sh # Deletes all the instance templates older than $DELETE_AGE_DAYS days. - name: Delete old instance templates run: | - DELETE_BEFORE_DATE=$(date --date="$DELETE_AGE_DAYS days ago" '+%Y%m%d') - TEMPLATES=$(gcloud compute instance-templates list --sort-by=creationTimestamp --filter="name~-[0-9a-f]{7,}$ AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME)') - - for TEMPLATE in $TEMPLATES - do - gcloud compute instance-templates delete "${TEMPLATE}" || continue - done + ./.github/workflows/scripts/gcp-delete-old-templates.sh # Deletes all mainnet and testnet disks older than $DELETE_AGE_DAYS days. # @@ -89,31 +80,7 @@ jobs: # so it can't be shell-quoted. - name: Delete old disks run: | - DELETE_BEFORE_DATE=$(date --date="$DELETE_AGE_DAYS days ago" '+%Y%m%d') - - IFS=$'\n' - # Disks created by PR jobs, and other jobs that use a commit hash - COMMIT_DISKS=$(gcloud compute disks list --sort-by=creationTimestamp --filter="name~-[0-9a-f]{7,}$ AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME,LOCATION,LOCATION_SCOPE)' | \ - sed 's/\(.*\)\t\(.*\)\t\(.*\)/\1 --\3=\2/') - - for DISK_AND_LOCATION in $COMMIT_DISKS - do - IFS=$' ' - gcloud compute disks delete --verbosity=info ${DISK_AND_LOCATION} || continue - IFS=$'\n' - done - - IFS=$'\n' - # Disks created by managed instance groups, and other jobs that start with "zebrad-" - ZEBRAD_DISKS=$(gcloud compute disks list --sort-by=creationTimestamp --filter="name~^zebrad- AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME,LOCATION,LOCATION_SCOPE)' | \ - sed 's/\(.*\)\t\(.*\)\t\(.*\)/\1 --\3=\2/') - - for DISK_AND_LOCATION in $ZEBRAD_DISKS - do - IFS=$' ' - gcloud compute disks delete --verbosity=info ${DISK_AND_LOCATION} || continue - IFS=$'\n' - done + ./.github/workflows/scripts/gcp-delete-old-disks.sh # Deletes mainnet and testnet cache images older than $DELETE_AGE_DAYS days. # @@ -125,108 +92,9 @@ jobs: # # TODO: # - refactor out repeated shell script code - - name: Delete old cache disks + - name: Delete old cache images run: | - DELETE_BEFORE_DATE=$(date --date="$DELETE_AGE_DAYS days ago" '+%Y%m%d') - - # As of April 2023, these disk names look like: - # zebrad-cache-6039-merge-62c8ecc-v25-mainnet-checkpoint-053559 - # - # Mainnet zebrad checkpoint - ZEBRAD_MAINNET_CHECKPOINT_IMAGES=$(gcloud compute images list --sort-by=~creationTimestamp --filter="name~^zebrad-cache-.*-mainnet-checkpoint AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME)') - KEPT_IMAGES=0 - for IMAGE in $ZEBRAD_MAINNET_CHECKPOINT_IMAGES - do - if [[ "$KEPT_IMAGES" -lt "$KEEP_LATEST_IMAGE_COUNT" ]]; - then - KEPT_IMAGES=$((KEPT_IMAGES+1)) - echo "Keeping image $KEPT_IMAGES named $IMAGE" - continue - fi - - gcloud compute images delete "${IMAGE}" || continue - done - - # Testnet zebrad checkpoint - ZEBRAD_TESTNET_CHECKPOINT_IMAGES=$(gcloud compute images list --sort-by=~creationTimestamp --filter="name~^zebrad-cache-.*-testnet-checkpoint AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME)') - KEPT_IMAGES=0 - for IMAGE in $ZEBRAD_TESTNET_CHECKPOINT_IMAGES - do - if [[ "$KEPT_IMAGES" -lt "$KEEP_LATEST_IMAGE_COUNT" ]]; - then - KEPT_IMAGES=$((KEPT_IMAGES+1)) - echo "Keeping image $KEPT_IMAGES named $IMAGE" - continue - fi - - gcloud compute images delete "${IMAGE}" || continue - done - - # As of April 2023, these disk names look like: - # zebrad-cache-6556-merge-a2ca4de-v25-mainnet-tip(-u)?-140654 - # - # Mainnet zebrad tip - ZEBRAD_MAINNET_TIP_IMAGES=$(gcloud compute images list --sort-by=~creationTimestamp --filter="name~^zebrad-cache-.*-mainnet-tip AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME)') - KEPT_IMAGES=0 - for IMAGE in $ZEBRAD_MAINNET_TIP_IMAGES - do - if [[ "$KEPT_IMAGES" -lt "$KEEP_LATEST_IMAGE_COUNT" ]]; - then - KEPT_IMAGES=$((KEPT_IMAGES+1)) - echo "Keeping image $KEPT_IMAGES named $IMAGE" - continue - fi - - gcloud compute images delete "${IMAGE}" || continue - done - - # Testnet zebrad tip - ZEBRAD_TESTNET_TIP_IMAGES=$(gcloud compute images list --sort-by=~creationTimestamp --filter="name~^zebrad-cache-.*-testnet-tip AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME)') - KEPT_IMAGES=0 - for IMAGE in $ZEBRAD_TESTNET_TIP_IMAGES - do - if [[ "$KEPT_IMAGES" -lt "$KEEP_LATEST_IMAGE_COUNT" ]]; - then - KEPT_IMAGES=$((KEPT_IMAGES+1)) - echo "Keeping image $KEPT_IMAGES named $IMAGE" - continue - fi - - gcloud compute images delete "${IMAGE}" || continue - done - - # As of April 2023, these disk names look like: - # lwd-cache-main-fb3fec0-v25-mainnet-tip(-u)?-061314 - # - # Mainnet lightwalletd tip - LWD_MAINNET_TIP_IMAGES=$(gcloud compute images list --sort-by=~creationTimestamp --filter="name~^lwd-cache-.*-mainnet-tip AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME)') - KEPT_IMAGES=0 - for IMAGE in $LWD_MAINNET_TIP_IMAGES - do - if [[ "$KEPT_IMAGES" -lt "$KEEP_LATEST_IMAGE_COUNT" ]]; - then - KEPT_IMAGES=$((KEPT_IMAGES+1)) - echo "Keeping image $KEPT_IMAGES named $IMAGE" - continue - fi - - gcloud compute images delete "${IMAGE}" || continue - done - - # Testnet lightwalletd tip - LWD_TESTNET_TIP_IMAGES=$(gcloud compute images list --sort-by=~creationTimestamp --filter="name~^lwd-cache-.*-testnet-tip AND creationTimestamp < $DELETE_BEFORE_DATE" --format='value(NAME)') - KEPT_IMAGES=0 - for IMAGE in $LWD_TESTNET_TIP_IMAGES - do - if [[ "$KEPT_IMAGES" -lt "$KEEP_LATEST_IMAGE_COUNT" ]]; - then - KEPT_IMAGES=$((KEPT_IMAGES+1)) - echo "Keeping image $KEPT_IMAGES named $IMAGE" - continue - fi - - gcloud compute images delete "${IMAGE}" || continue - done + ./.github/workflows/scripts/gcp-delete-old-cache-images.sh # We're using a generic approach here, which allows multiple registries to be included, # even those not related to GCP. Enough reason to create a separate job. @@ -246,7 +114,7 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v1.1.1 + uses: google-github-actions/auth@v2.1.1 with: retries: '3' workload_identity_provider: '${{ vars.GCP_WIF }}' diff --git a/.github/workflows/chore-project-management.yml b/.github/workflows/chore-project-management.yml index b9c4e1a8bd8..4825e353c94 100644 --- a/.github/workflows/chore-project-management.yml +++ b/.github/workflows/chore-project-management.yml @@ -1,6 +1,12 @@ +# This workflow manages the automatic addition of new issues to specific GitHub projects. +# +# 1. Newly opened issues are added to the "Zebra Backlog" Github project. +# 2. They are also added to the "ZF Engineering Backlog" Github project. +# +# The action makes use of the `add-to-project` action and requires a Github token +# (currently sourced from secrets) to authenticate and perform the addition. name: Add new issues to GitHub projects -# Configuration for automatically adding issues to various Github projects for Project Management purposes on: issues: types: diff --git a/.github/workflows/ci-build-crates.patch.yml b/.github/workflows/ci-build-crates.patch.yml index e173752dd06..58d8f8210d1 100644 --- a/.github/workflows/ci-build-crates.patch.yml +++ b/.github/workflows/ci-build-crates.patch.yml @@ -33,11 +33,11 @@ jobs: # This step is meant to dynamically create a JSON containing the values of each crate # available in this repo in the root directory. We use `cargo tree` to accomplish this task. # - # The result from `cargo tree` is then transform to JSON values between double quotes, + # The result from `cargo tree` is then transform to JSON values between double quotes, # and separated by commas, then added to a `crates.txt` and assigned to a $JSON_CRATES variable. # # A JSON object is created and assigned to a $MATRIX variable, which is use to create an output - # named `matrix`, which is then used as the input in following steps, + # named `matrix`, which is then used as the input in following steps, # using ` ${{ fromJson(needs.matrix.outputs.matrix) }}` - id: set-matrix name: Dynamically build crates JSON diff --git a/.github/workflows/ci-build-crates.yml b/.github/workflows/ci-build-crates.yml index 77ea4b44e2c..5347eee6b29 100644 --- a/.github/workflows/ci-build-crates.yml +++ b/.github/workflows/ci-build-crates.yml @@ -1,3 +1,13 @@ +# This workflow facilitates the individual building of Rust crates present in the repository. +# 1. A matrix is generated dynamically to identify each crate in the repository. +# 2. This matrix is checked for validity. +# 3. Each identified crate undergoes three build processes: +# - With no features. +# - With the default features. +# - With all the features enabled. +# 4. In case of build failures outside of pull requests, an issue is either opened or updated +# in the repository to report the failure. +# Throughout the workflow, various setup steps ensure the correct environment and tools are present. name: Build crates individually # Ensures that only one workflow task will run at a time. Previous builds, if @@ -61,17 +71,18 @@ jobs: # This step is meant to dynamically create a JSON containing the values of each crate # available in this repo in the root directory. We use `cargo tree` to accomplish this task. # - # The result from `cargo tree` is then transform to JSON values between double quotes, - # and separated by commas, then added to a `crates.txt` and assigned to a $JSON_CRATES variable. + # The result from `cargo tree` is then sorted so the longest job (zebrad) runs first, + # transformed to JSON values between double quotes, and separated by commas, + # then added to a `crates.txt`. # - # A JSON object is created and assigned to a $MATRIX variable, which is use to create an output - # named `matrix`, which is then used as the input in following steps, + # A JSON object is created and assigned to a $MATRIX variable, which is use to create an + # output named `matrix`, which is then used as the input in following steps, # using ` ${{ fromJson(needs.matrix.outputs.matrix) }}` - id: set-matrix name: Dynamically build crates JSON run: | TEMP_DIR=$(mktemp -d) - echo "$(cargo tree --depth 0 --edges no-normal,no-dev,no-build,no-proc-macro --prefix none | cut -d ' ' -f1 | sed '/^$/d' | awk '{ printf "\"%s\",\n", $0 }' | sed '$ s/.$//')" > $TEMP_DIR/crates.txt + cargo tree --depth 0 --edges no-normal,no-dev,no-build,no-proc-macro --prefix none | cut -d ' ' -f1 | sed '/^$/d' | LC_ALL=C sort --reverse | awk '{ printf "\"%s\",\n", $0 }' | sed '$ s/.$//' > $TEMP_DIR/crates.txt MATRIX=$( ( echo '{ "crate" : [' echo "$(cat $TEMP_DIR/crates.txt)" @@ -101,10 +112,12 @@ jobs: name: Build ${{ matrix.crate }} crate timeout-minutes: 90 needs: [ matrix, check-matrix ] - runs-on: ubuntu-latest + # Some of these builds take more than 14GB disk space + runs-on: ubuntu-latest-m strategy: - # avoid rate-limit errors by only launching a few of these jobs at a time - max-parallel: 2 + # avoid rate-limit errors by only launching a few of these jobs at a time, + # but still finish in a similar time to the longest tests + max-parallel: 4 fail-fast: true matrix: ${{ fromJson(needs.matrix.outputs.matrix) }} @@ -115,7 +128,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Install last version of Protoc - uses: arduino/setup-protoc@v2.1.0 + uses: arduino/setup-protoc@v3.0.0 with: # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed version: '23.x' @@ -126,21 +139,30 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal - # We could use `features: ['', '--all-features', '--no-default-features']` as a matrix argument, - # but it's faster to run these commands sequentially, so they can re-use the local cargo cache. - # - # Some Zebra crates do not have any features, and most don't have any default features. - - name: Build ${{ matrix.crate }} crate with no default features + # We could use `features: ['', '--all-features', '--no-default-features']` as a matrix argument, + # but it's faster to run these commands sequentially, so they can re-use the local cargo cache. + # + # Some Zebra crates do not have any features, and most don't have any default features. + # Some targets activate features, but we still need to be able to build without them. + - name: Build ${{ matrix.crate }} crate with default features + run: | + cargo clippy --package ${{ matrix.crate }} -- -D warnings + cargo build --package ${{ matrix.crate }} + + - name: Build ${{ matrix.crate }} crate with no default features and all targets run: | - cargo build --package ${{ matrix.crate }} --no-default-features + cargo clippy --package ${{ matrix.crate }} --no-default-features --all-targets -- -D warnings + cargo build --package ${{ matrix.crate }} --no-default-features --all-targets - - name: Build ${{ matrix.crate }} crate normally + - name: Build ${{ matrix.crate }} crate with default features and all targets run: | - cargo build --package ${{ matrix.crate }} + cargo clippy --package ${{ matrix.crate }} --all-targets -- -D warnings + cargo build --package ${{ matrix.crate }} --all-targets - - name: Build ${{ matrix.crate }} crate with all features + - name: Build ${{ matrix.crate }} crate with all features and all targets run: | - cargo build --package ${{ matrix.crate }} --all-features + cargo clippy --package ${{ matrix.crate }} --all-features --all-targets -- -D warnings + cargo build --package ${{ matrix.crate }} --all-features --all-targets failure-issue: name: Open or update issues for building crates individually failures diff --git a/.github/workflows/ci-coverage.yml b/.github/workflows/ci-coverage.yml index 5d6179fed92..b1bea8bfc69 100644 --- a/.github/workflows/ci-coverage.yml +++ b/.github/workflows/ci-coverage.yml @@ -1,3 +1,11 @@ +# This workflow calculates the test coverage for the Rust codebase. +# 1. The code is checked out. +# 2. Rust with the stable toolchain, minimal profile, and llvm-tools-preview component is set up. +# 3. Necessary tools like 'cargo-llvm-cov' are installed. +# 4. Proptest is minimized for efficient coverage test runs. +# 5. Tests are run without producing a report to gather coverage information. +# 6. A coverage report (lcov format) is generated based on the gathered information. +# 7. Finally, this report is uploaded to Codecov for visualization and analysis. name: Coverage # Ensures that only one workflow task will run at a time. Previous builds, if @@ -95,4 +103,4 @@ jobs: run: cargo llvm-cov --lcov --no-run --output-path lcov.info - name: Upload coverage report to Codecov - uses: codecov/codecov-action@v3.1.4 + uses: codecov/codecov-action@v4.0.1 diff --git a/.github/workflows/ci-integration-tests-gcp.patch-external.yml b/.github/workflows/ci-integration-tests-gcp.patch-external.yml new file mode 100644 index 00000000000..7590b3b4296 --- /dev/null +++ b/.github/workflows/ci-integration-tests-gcp.patch-external.yml @@ -0,0 +1,108 @@ +# Workflow patches for skipping Google Cloud unit test CI on PRs from external repositories. +name: Integration Tests on GCP + +# Run on PRs from external repositories, let them pass, and then Mergify will check them. +# GitHub doesn't support filtering workflows by source branch names, so we have to do it for each +# job. +on: + pull_request: + +# IMPORTANT +# +# The job names in `ci-integration-tests-gcp.yml`, `ci-integration-tests-gcp.patch.yml` and +# `ci-integration-tests-gcp.patch-external.yml` must be kept in sync. +jobs: + # We don't patch the testnet job, because testnet isn't required to merge (it's too unstable) + get-available-disks: + name: Check if cached state disks exist for Mainnet / Check if cached state disks exist + # Only run on PRs from external repositories. + if: ${{ startsWith(github.event_name, 'pull') && github.event.pull_request.head.repo.fork }} + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' + + build: + name: Build CI Docker / Build images + # This dependency allows all these jobs to depend on a single condition, making it easier to + # change. + needs: get-available-disks + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' + + test-stateful-sync: + name: Zebra checkpoint update / Run sync-past-checkpoint test + needs: get-available-disks + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' + + test-update-sync: + name: Zebra tip update / Run update-to-tip test + needs: get-available-disks + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' + + checkpoints-mainnet: + name: Generate checkpoints mainnet / Run checkpoints-mainnet test + needs: get-available-disks + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' + + lightwalletd-rpc-test: + name: Zebra tip JSON-RPC / Run fully-synced-rpc test + needs: get-available-disks + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' + + lightwalletd-transactions-test: + name: lightwalletd tip send / Run lwd-send-transactions test + needs: get-available-disks + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' + + get-block-template-test: + name: get block template / Run get-block-template test + needs: get-available-disks + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' + + submit-block-test: + name: submit block / Run submit-block test + needs: get-available-disks + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' + + scan-start-where-left-test: + name: Scan starts where left / Run scan-start-where-left test + needs: get-available-disks + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' + + lightwalletd-full-sync: + name: lightwalletd tip / Run lwd-full-sync test + needs: get-available-disks + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' + + lightwalletd-update-sync: + name: lightwalletd tip update / Run lwd-update-sync test + needs: get-available-disks + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' + + lightwalletd-grpc-test: + name: lightwalletd GRPC tests / Run lwd-grpc-wallet test + needs: get-available-disks + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' diff --git a/.github/workflows/ci-integration-tests-gcp.patch.yml b/.github/workflows/ci-integration-tests-gcp.patch.yml index f671429b5bf..ac5d1dbf9cd 100644 --- a/.github/workflows/ci-integration-tests-gcp.patch.yml +++ b/.github/workflows/ci-integration-tests-gcp.patch.yml @@ -1,7 +1,8 @@ +# Workflow patches for skipping Google Cloud integration test CI when Rust code or dependencies +# aren't modified in a PR. name: Integration Tests on GCP -# These jobs *don't* depend on cached Google Cloud state disks, -# so they can be skipped when the modified files make the actual workflow run. +# Run on PRs with unmodified code and dependency files. on: pull_request: paths-ignore: @@ -25,6 +26,10 @@ on: - '.github/workflows/sub-find-cached-disks.yml' - '.github/workflows/sub-build-docker-image.yml' +# IMPORTANT +# +# The job names in `ci-integration-tests-gcp.yml`, `ci-integration-tests-gcp.patch.yml` and +# `ci-integration-tests-gcp.patch-external.yml` must be kept in sync. jobs: # We don't patch the testnet job, because testnet isn't required to merge (it's too unstable) get-available-disks: diff --git a/.github/workflows/ci-integration-tests-gcp.yml b/.github/workflows/ci-integration-tests-gcp.yml index 1132762c210..b411d0c7575 100644 --- a/.github/workflows/ci-integration-tests-gcp.yml +++ b/.github/workflows/ci-integration-tests-gcp.yml @@ -1,3 +1,8 @@ +# Google Cloud integration tests that run when Rust code or dependencies are modified, +# but only on PRs from the ZcashFoundation/zebra repository. (External PRs are tested by mergify.) +# +# Specific conditions and dependencies are set for each job to ensure they are executed in the correct sequence and under the right circumstances. +# Each test has a description of the conditions under which it runs. name: Integration Tests on GCP # Ensures that only one workflow task will run at a time. Previous builds, if @@ -47,6 +52,7 @@ on: default: false pull_request: + # Skip PRs where Rust code and dependencies aren't modified. paths: # code and tests - '**/*.rs' @@ -68,6 +74,7 @@ on: - '.github/workflows/sub-find-cached-disks.yml' push: + # Skip main branch updates where Rust code and dependencies aren't modified. branches: - main paths: @@ -91,6 +98,10 @@ on: - '.github/workflows/sub-find-cached-disks.yml' - '.github/workflows/sub-build-docker-image.yml' +# IMPORTANT +# +# The job names in `ci-integration-tests-gcp.yml`, `ci-integration-tests-gcp.patch.yml` and +# `ci-integration-tests-gcp.patch-external.yml` must be kept in sync. jobs: # to also run a job on Mergify head branches, # add `|| (github.event_name == 'push' && startsWith(github.head_ref, 'mergify/merge-queue/'))`: @@ -104,6 +115,8 @@ jobs: # The outputs for this job have the same names as the workflow outputs in sub-find-cached-disks.yml get-available-disks: name: Check if cached state disks exist for ${{ inputs.network || vars.ZCASH_NETWORK }} + # Skip PRs from external repositories, let them pass, and then Mergify will check them + if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }} uses: ./.github/workflows/sub-find-cached-disks.yml with: network: ${{ inputs.network || vars.ZCASH_NETWORK }} @@ -114,6 +127,7 @@ jobs: # Some outputs are ignored, because we don't run those jobs on testnet. get-available-disks-testnet: name: Check if cached state disks exist for testnet + if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }} uses: ./.github/workflows/sub-find-cached-disks.yml with: network: 'Testnet' @@ -125,6 +139,7 @@ jobs: # testnet when running the image. build: name: Build CI Docker + if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }} uses: ./.github/workflows/sub-build-docker-image.yml with: dockerfile_path: ./docker/Dockerfile @@ -574,20 +589,46 @@ jobs: # Otherwise, if the state rebuild was skipped, runs immediately after the build job. submit-block-test: name: submit block - needs: [ test-full-sync, get-available-disks ] + needs: [test-full-sync, get-available-disks] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad test_id: submit-block test_description: Test submitting blocks via Zebra's rpc server - test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SUBMIT_BLOCK=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SUBMIT_BLOCK=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" needs_zebra_state: true needs_lwd_state: false saves_to_disk: false disk_suffix: tip - root_state_path: '/var/cache' - zebra_state_dir: 'zebrad-cache' + root_state_path: "/var/cache" + zebra_state_dir: "zebrad-cache" + secrets: inherit + + # Test that the scanner can continue scanning where it was left when zebrad restarts. + # + # Runs: + # - after every PR is merged to `main` + # - on every PR update + # + # If the state version has changed, waits for the new cached states to be created. + # Otherwise, if the state rebuild was skipped, runs immediately after the build job. + scan-start-where-left-test: + name: Scan starts where left + needs: [test-full-sync, get-available-disks] + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml + if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} + with: + app_name: zebrad + test_id: scan-start-where-left + test_description: Test that the scanner can continue scanning where it was left when zebrad restarts. + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SCAN_START_WHERE_LEFT=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" + needs_zebra_state: true + needs_lwd_state: false + saves_to_disk: true + disk_suffix: tip + root_state_path: "/var/cache" + zebra_state_dir: "zebrad-cache" secrets: inherit failure-issue: @@ -596,7 +637,22 @@ jobs: # # This list is for reliable tests that are run on the `main` branch. # Testnet jobs are not in this list, because we expect testnet to fail occasionally. - needs: [ regenerate-stateful-disks, test-full-sync, lightwalletd-full-sync, test-stateful-sync, test-update-sync, checkpoints-mainnet, lightwalletd-update-sync, lightwalletd-rpc-test, lightwalletd-transactions-test, lightwalletd-grpc-test, get-block-template-test, submit-block-test ] + needs: + [ + regenerate-stateful-disks, + test-full-sync, + lightwalletd-full-sync, + test-stateful-sync, + test-update-sync, + checkpoints-mainnet, + lightwalletd-update-sync, + lightwalletd-rpc-test, + lightwalletd-transactions-test, + lightwalletd-grpc-test, + get-block-template-test, + submit-block-test, + scan-start-where-left-test, + ] # Only open tickets for failed scheduled jobs, manual workflow runs, or `main` branch merges. # (PR statuses are already reported in the PR jobs list, and checked by Mergify.) if: (failure() && github.event.pull_request == null) || (cancelled() && github.event.pull_request == null) diff --git a/.github/workflows/ci-lint.yml b/.github/workflows/ci-lint.yml index 02881a0280e..c80c9f32850 100644 --- a/.github/workflows/ci-lint.yml +++ b/.github/workflows/ci-lint.yml @@ -1,3 +1,10 @@ +# This workflow conducts various linting checks for a Rust-based project. +# 1. Determines if Rust or workflow files have been modified. +# 2. Runs the Clippy linter on Rust files, producing annotations and failing on warnings. +# 3. Ensures Rust code formatting complies with 'rustfmt' standards. +# 4. Lints GitHub Actions workflow files for common issues. +# 5. Checks for common spelling errors in the codebase. +# The workflow is designed to maintain code quality and consistency, running checks conditionally based on the changed files. name: Lint # Ensures that only one workflow task will run at a time. Previous builds, if @@ -37,7 +44,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v39.2.3 + uses: tj-actions/changed-files@v42.0.4 with: files: | **/*.rs @@ -49,7 +56,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v39.2.3 + uses: tj-actions/changed-files@v42.0.4 with: files: | .github/workflows/*.yml @@ -67,7 +74,7 @@ jobs: persist-credentials: false - name: Install last version of Protoc - uses: arduino/setup-protoc@v2.1.0 + uses: arduino/setup-protoc@v3.0.0 with: # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed version: '23.x' @@ -86,7 +93,7 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=default - - uses: Swatinem/rust-cache@v2.7.0 + - uses: Swatinem/rust-cache@v2.7.3 with: shared-key: "clippy-cargo-lock" @@ -98,11 +105,11 @@ jobs: # GitHub displays the clippy job and its results as separate entries name: Clippy (stable) Results token: ${{ secrets.GITHUB_TOKEN }} - args: --all-features --all-targets -- -D warnings + args: --workspace --all-features --all-targets -- -D warnings - name: Run clippy manually without annotations if: ${{ !steps.check_permissions.outputs.has-permission }} - run: cargo clippy --all-features --all-targets -- -D warnings + run: cargo clippy --workspace --all-features --all-targets -- -D warnings fmt: name: Rustfmt @@ -118,7 +125,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Install last version of Protoc - uses: arduino/setup-protoc@v2.1.0 + uses: arduino/setup-protoc@v3.0.0 with: # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed version: '23.x' @@ -131,7 +138,7 @@ jobs: # We don't cache `fmt` outputs because the job is quick, # and we want to use the limited GitHub actions cache space for slower jobs. - #- uses: Swatinem/rust-cache@v2.7.0 + #- uses: Swatinem/rust-cache@v2.7.3 - run: | cargo fmt --all -- --check @@ -144,19 +151,23 @@ jobs: steps: - uses: actions/checkout@v4.1.1 - name: actionlint - uses: reviewdog/action-actionlint@v1.39.1 + uses: reviewdog/action-actionlint@v1.41.0 with: level: warning fail_on_error: false - - name: validate-dependabot - uses: marocchino/validate-dependabot@v2.1.0 + # This is failing with a JSON schema error, see #8028 for details. + #- name: validate-dependabot + # # This gives an error when run on PRs from external repositories, so we skip it. + # # If this is a PR, check that the PR source is a local branch. Always runs on non-PRs. + # if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }} + # uses: marocchino/validate-dependabot@v2.1.0 codespell: runs-on: ubuntu-latest needs: changed-files steps: - uses: actions/checkout@v4.1.1 - - uses: plettich/action-codespell@master + - uses: codespell-project/actions-codespell@v2.0 with: - github_token: ${{ secrets.github_token }} - level: warning + only_warn: 1 + diff --git a/.github/workflows/ci-unit-tests-docker.patch-external.yml b/.github/workflows/ci-unit-tests-docker.patch-external.yml new file mode 100644 index 00000000000..2db50e52f7a --- /dev/null +++ b/.github/workflows/ci-unit-tests-docker.patch-external.yml @@ -0,0 +1,65 @@ +# Workflow patches for skipping Google Cloud unit test CI on PRs from external repositories. +name: Docker Unit Tests + +# Run on PRs from external repositories, let them pass, and then Mergify will check them. +# GitHub doesn't support filtering workflows by source branch names, so we have to do it for each +# job. +on: + pull_request: + +# IMPORTANT +# +# The job names in `ci-unit-tests-docker.yml`, `ci-unit-tests-docker.patch.yml` and +# `ci-unit-tests-docker.patch-external.yml` must be kept in sync. +jobs: + build: + name: Build CI Docker / Build images + # Only run on PRs from external repositories. + if: ${{ startsWith(github.event_name, 'pull') && github.event.pull_request.head.repo.fork }} + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' + + test-all: + name: Test all + # This dependency allows all these jobs to depend on a single condition, making it easier to + # change. + needs: build + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' + + test-fake-activation-heights: + name: Test with fake activation heights + needs: build + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' + + test-empty-sync: + name: Test checkpoint sync from empty state + needs: build + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' + + test-lightwalletd-integration: + name: Test integration with lightwalletd + needs: build + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' + + test-configuration-file: + name: Test CI default Docker config file / Test default-conf in Docker + needs: build + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' + + test-zebra-conf-path: + name: Test CI custom Docker config file / Test custom-conf in Docker + needs: build + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' diff --git a/.github/workflows/ci-unit-tests-docker.patch.yml b/.github/workflows/ci-unit-tests-docker.patch.yml index 3ddb5f51694..0aeef22bafd 100644 --- a/.github/workflows/ci-unit-tests-docker.patch.yml +++ b/.github/workflows/ci-unit-tests-docker.patch.yml @@ -1,7 +1,7 @@ +# Workflow patches for skipping unit test CI when Rust code or dependencies aren't modified in a PR. name: Docker Unit Tests -# These jobs *don't* depend on cached Google Cloud state disks, -# so they can be skipped when the modified files make the actual workflow run. +# Run on PRs with unmodified code and dependency files. on: pull_request: paths-ignore: @@ -25,14 +25,11 @@ on: - '.github/workflows/sub-find-cached-disks.yml' - '.github/workflows/sub-build-docker-image.yml' +# IMPORTANT +# +# The job names in `ci-unit-tests-docker.yml`, `ci-unit-tests-docker.patch.yml` and +# `ci-unit-tests-docker.patch-external.yml` must be kept in sync. jobs: - # We don't patch the testnet job, because testnet isn't required to merge (it's too unstable) - get-available-disks: - name: Check if cached state disks exist for Mainnet / Check if cached state disks exist - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - build: name: Build CI Docker / Build images runs-on: ubuntu-latest diff --git a/.github/workflows/ci-unit-tests-docker.yml b/.github/workflows/ci-unit-tests-docker.yml index 5f90fbf2184..9bed872f1ac 100644 --- a/.github/workflows/ci-unit-tests-docker.yml +++ b/.github/workflows/ci-unit-tests-docker.yml @@ -1,3 +1,16 @@ +# Google Cloud unit tests that run when Rust code or dependencies are modified, +# but only on PRs from the ZcashFoundation/zebra repository. (External PRs are tested by mergify.) +# +# This workflow is designed for running various unit tests within Docker containers. +# Jobs: +# 1. Builds a Docker image for tests, adaptable to the specified network (Mainnet or Testnet). +# 2. 'test-all': Executes all Zebra tests, including normally ignored ones, in a Docker environment. +# 3. 'test-fake-activation-heights': Runs state tests with fake activation heights, isolating its build products. +# 4. 'test-empty-sync': Tests Zebra's ability to sync and checkpoint from an empty state. +# 5. 'test-lightwalletd-integration': Validates integration with 'lightwalletd' starting from an empty state. +# 6. 'test-configuration-file': Assesses the default Docker configuration for Zebra. +# 7. 'test-configuration-file-testnet': Checks the Docker image reconfiguration for the Testnet. +# 8. 'test-zebra-conf-path': Tests Zebra using a custom Docker configuration. name: Docker Unit Tests # Ensures that only one workflow task will run at a time. Previous builds, if @@ -21,6 +34,7 @@ on: default: false pull_request: + # Skip PRs where Rust code and dependencies aren't modified. paths: # code and tests - '**/*.rs' @@ -40,10 +54,12 @@ on: - '.github/workflows/sub-deploy-integration-tests-gcp.yml' - '.github/workflows/sub-build-docker-image.yml' - '.github/workflows/sub-find-cached-disks.yml' + - '.github/workflows/sub-test-zebra-config.yml' push: branches: - main + # Skip main branch updates where Rust code and dependencies aren't modified. paths: # code and tests - '**/*.rs' @@ -66,15 +82,16 @@ on: - '.github/workflows/sub-build-docker-image.yml' env: - # We need to combine the features manually because some tests don't use the Docker entrypoint - TEST_FEATURES: ${{ format('{0} {1}', vars.RUST_PROD_FEATURES, vars.RUST_TEST_FEATURES) }} - EXPERIMENTAL_FEATURES: ${{ format('{0} {1} {2}', vars.RUST_PROD_FEATURES, vars.RUST_TEST_FEATURES, vars.RUST_EXPERIMENTAL_FEATURES) }} RUST_LOG: ${{ vars.RUST_LOG }} RUST_BACKTRACE: ${{ vars.RUST_BACKTRACE }} RUST_LIB_BACKTRACE: ${{ vars.RUST_LIB_BACKTRACE }} COLORBT_SHOW_HIDDEN: ${{ vars.COLORBT_SHOW_HIDDEN }} CARGO_INCREMENTAL: ${{ vars.CARGO_INCREMENTAL }} +# IMPORTANT +# +# The job names in `ci-unit-tests-docker.yml`, `ci-unit-tests-docker.patch.yml` and +# `ci-unit-tests-docker.patch-external.yml` must be kept in sync. jobs: # Build the docker image used by the tests. # @@ -83,6 +100,8 @@ jobs: # testnet when running the image. build: name: Build CI Docker + # Skip PRs from external repositories, let them pass, and then Mergify will check them + if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }} uses: ./.github/workflows/sub-build-docker-image.yml with: dockerfile_path: ./docker/Dockerfile @@ -113,18 +132,21 @@ jobs: # # If some tests hang, add "-- --nocapture" for just that test, or for all the tests. # - # TODO: move this test command into entrypoint.sh - # add a separate experimental workflow job if this job is slow - name: Run zebrad tests + env: + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} - docker run -e NETWORK --name zebrad-tests --tty ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} cargo test --locked --release --features "${{ env.TEST_FEATURES }}" --workspace -- --include-ignored - # Currently GitHub doesn't allow empty variables - if [[ -n "${{ vars.RUST_EXPERIMENTAL_FEATURES }}" && "${{ vars.RUST_EXPERIMENTAL_FEATURES }}" != " " ]]; then - docker run -e NETWORK --name zebrad-tests-experimental --tty ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} cargo test --locked --release --features "${{ env.EXPERIMENTAL_FEATURES }} " --workspace -- --include-ignored - fi + docker run --tty -e NETWORK -e RUN_ALL_TESTS=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} + + # Run unit, basic acceptance tests, and ignored tests with experimental features. + # + - name: Run zebrad tests with experimental features env: NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + run: | + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} + docker run --tty -e NETWORK -e RUN_ALL_EXPERIMENTAL_TESTS=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} # Run state tests with fake activation heights. # @@ -148,15 +170,12 @@ jobs: with: short-length: 7 - # TODO: move this test command into entrypoint.sh - # make sure that at least one test runs, and that it doesn't skip itself due to the environmental variable - name: Run tests with fake activation heights - run: | - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} - docker run -e NETWORK -e TEST_FAKE_ACTIVATION_HEIGHTS --name zebrad-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} cargo test --locked --release --features "zebra-test" --package zebra-state --lib -- --nocapture --include-ignored with_fake_activation_heights env: - TEST_FAKE_ACTIVATION_HEIGHTS: '1' NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + run: | + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} + docker run --tty -e NETWORK -e TEST_FAKE_ACTIVATION_HEIGHTS=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} # Test that Zebra syncs and checkpoints a few thousand blocks from an empty state. test-empty-sync: @@ -172,13 +191,12 @@ jobs: with: short-length: 7 - # TODO: move this test command into entrypoint.sh - name: Run zebrad large sync tests - run: | - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} - docker run -e NETWORK --name zebrad-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} cargo test --locked --release --features "${{ env.TEST_FEATURES }}" --package zebrad --test acceptance -- --nocapture --include-ignored sync_large_checkpoints_ env: NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + run: | + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} + docker run --tty -e NETWORK -e TEST_ZEBRA_EMPTY_SYNC=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} # Test launching lightwalletd with an empty lightwalletd and Zebra state. test-lightwalletd-integration: @@ -194,14 +212,12 @@ jobs: with: short-length: 7 - # TODO: move this test command into entrypoint.sh - name: Run tests with empty lightwalletd launch - run: | - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} - docker run -e NETWORK -e ZEBRA_TEST_LIGHTWALLETD --name lightwalletd-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} cargo test --locked --release --features "${{ env.TEST_FEATURES }}" --package zebrad --test acceptance -- --nocapture --include-ignored lightwalletd_integration env: - ZEBRA_TEST_LIGHTWALLETD: '1' NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + run: | + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} + docker run --tty -e NETWORK -e ZEBRA_TEST_LIGHTWALLETD=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} # Test that Zebra works using the default config with the latest Zebra version. test-configuration-file: @@ -237,7 +253,7 @@ jobs: with: test_id: 'custom-conf' docker_image: ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} - grep_patterns: '-e "v1.0.0-rc.2.toml"' + grep_patterns: '-e "loaded zebrad config.*config_path.*=.*v1.0.0-rc.2.toml"' test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="zebrad/tests/common/configs/v1.0.0-rc.2.toml"' network: ${{ inputs.network || vars.ZCASH_NETWORK }} diff --git a/.github/workflows/ci-unit-tests-os.patch.yml b/.github/workflows/ci-unit-tests-os.patch.yml index 549f1809a51..4a97eb719f5 100644 --- a/.github/workflows/ci-unit-tests-os.patch.yml +++ b/.github/workflows/ci-unit-tests-os.patch.yml @@ -21,7 +21,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - os: [ubuntu-latest] + os: [ubuntu-latest, macos-latest] rust: [stable, beta] features: [""] exclude: diff --git a/.github/workflows/ci-unit-tests-os.yml b/.github/workflows/ci-unit-tests-os.yml index 69ed6ad40bd..1862e4b041a 100644 --- a/.github/workflows/ci-unit-tests-os.yml +++ b/.github/workflows/ci-unit-tests-os.yml @@ -1,3 +1,9 @@ +# This workflow performs unit tests across different operating systems and Rust versions. It includes steps for: +# - Testing on Ubuntu and macOS with stable and beta Rust toolchains. +# - Installing Zebra from the lockfile without cache on Ubuntu. +# - Verifying that Cargo.lock is up-to-date with Cargo.toml changes. +# - Running cargo-deny checks for dependencies. +# - Checking for unused dependencies in the code. name: Multi-OS Unit Tests # Ensures that only one workflow task will run at a time. Previous builds, if @@ -51,7 +57,6 @@ on: - '**/clippy.toml' # workflow definitions - '.github/workflows/ci-unit-tests-os.yml' - - '.github/workflows/sub-build-docker-image.yml' env: CARGO_INCREMENTAL: ${{ vars.CARGO_INCREMENTAL }} @@ -74,8 +79,7 @@ jobs: fail-fast: false matrix: # TODO: Windows was removed for now, see https://github.com/ZcashFoundation/zebra/issues/3801 - # TODO: macOS tests were removed for now, see https://github.com/ZcashFoundation/zebra/issues/6824 - os: [ubuntu-latest] + os: [ubuntu-latest, macos-latest] rust: [stable, beta] # TODO: When vars.EXPERIMENTAL_FEATURES has features in it, add it here. # Or work out a way to trim the space from the variable: GitHub doesn't allow empty variables. @@ -97,7 +101,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Install last version of Protoc - uses: arduino/setup-protoc@v2.1.0 + uses: arduino/setup-protoc@v3.0.0 with: # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed version: '23.x' @@ -109,7 +113,7 @@ jobs: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=${{ matrix.rust }} --profile=minimal - - uses: Swatinem/rust-cache@v2.7.0 + - uses: Swatinem/rust-cache@v2.7.3 # TODO: change Rust cache target directory on Windows, # or remove this workaround once the build is more efficient (#3005). #with: @@ -208,7 +212,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Install last version of Protoc - uses: arduino/setup-protoc@v2.1.0 + uses: arduino/setup-protoc@v3.0.0 with: version: '23.x' repo-token: ${{ secrets.GITHUB_TOKEN }} @@ -218,7 +222,7 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal - - uses: Swatinem/rust-cache@v2.7.0 + - uses: Swatinem/rust-cache@v2.7.3 with: shared-key: "clippy-cargo-lock" @@ -277,7 +281,7 @@ jobs: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal - name: Install cargo-machete - uses: baptiste0928/cargo-install@v2.2.0 + uses: baptiste0928/cargo-install@v3.0.0 with: crate: cargo-machete diff --git a/.github/workflows/docs-deploy-firebase.patch-external.yml b/.github/workflows/docs-deploy-firebase.patch-external.yml new file mode 100644 index 00000000000..8478e4c2ded --- /dev/null +++ b/.github/workflows/docs-deploy-firebase.patch-external.yml @@ -0,0 +1,30 @@ +# Workflow patches for skipping Google Cloud docs updates on PRs from external repositories. +name: Docs + +# Run on PRs from external repositories, let them pass, and then Mergify will check them. +# GitHub doesn't support filtering workflows by source branch names, so we have to do it for each +# job. +on: + pull_request: + +# IMPORTANT +# +# The job names in `docs-deploy-firebase.yml`, `docs-deploy-firebase.patch.yml` and +# `docs-deploy-firebase.patch-external.yml` must be kept in sync. +jobs: + build-docs-book: + name: Build and Deploy Zebra Book Docs + # Only run on PRs from external repositories. + if: ${{ startsWith(github.event_name, 'pull') && github.event.pull_request.head.repo.fork }} + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' + + build-docs-internal: + name: Build and Deploy Zebra Internal Docs + # This dependency allows all these jobs to depend on a single condition, making it easier to + # change. + needs: build-docs-book + runs-on: ubuntu-latest + steps: + - run: 'echo "Skipping job on fork"' diff --git a/.github/workflows/docs-deploy-firebase.patch.yml b/.github/workflows/docs-deploy-firebase.patch.yml index 04b4bdc46d4..7d84ff75961 100644 --- a/.github/workflows/docs-deploy-firebase.patch.yml +++ b/.github/workflows/docs-deploy-firebase.patch.yml @@ -1,9 +1,10 @@ +# Workflow patches for skipping Google Cloud docs updates when docs, Rust code, or dependencies +# aren't modified in a PR. name: Docs +# Run on PRs with unmodified docs, code, and dependency files. on: pull_request: - branches: - - main paths-ignore: # doc source files - 'book/**' @@ -20,6 +21,10 @@ on: # workflow definitions - '.github/workflows/docs-deploy-firebase.yml' +# IMPORTANT +# +# The job names in `docs-deploy-firebase.yml`, `docs-deploy-firebase.patch.yml` and +# `docs-deploy-firebase.patch-external.yml` must be kept in sync. jobs: build-docs-book: name: Build and Deploy Zebra Book Docs @@ -27,16 +32,8 @@ jobs: steps: - run: 'echo "No build required"' - build-docs-external: - name: Build and Deploy Zebra External Docs - timeout-minutes: 45 - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - build-docs-internal: name: Build and Deploy Zebra Internal Docs - timeout-minutes: 45 runs-on: ubuntu-latest steps: - run: 'echo "No build required"' diff --git a/.github/workflows/docs-deploy-firebase.yml b/.github/workflows/docs-deploy-firebase.yml index 806211131c1..71cdd2927e2 100644 --- a/.github/workflows/docs-deploy-firebase.yml +++ b/.github/workflows/docs-deploy-firebase.yml @@ -1,3 +1,9 @@ +# Google Cloud docs updates that run when docs, Rust code, or dependencies are modified, +# but only on PRs from the ZcashFoundation/zebra repository. (External PRs are deployed by mergify.) + +# - Builds and deploys Zebra Book Docs using mdBook, setting up necessary tools and deploying to Firebase. +# - Compiles and deploys external documentation, setting up Rust with the beta toolchain and default profile, building the docs, and deploying them to Firebase. +# - Assembles and deploys internal documentation with similar steps, including private items in the documentation, and deploys to Firebase. name: Docs # Ensures that only one workflow task will run at a time. Previous deployments, if @@ -9,7 +15,9 @@ concurrency: on: workflow_dispatch: + push: + # Skip main branch updates where docs, Rust code, and dependencies aren't modified. branches: - main paths: @@ -29,8 +37,7 @@ on: - '.github/workflows/docs-deploy-firebase.yml' pull_request: - branches: - - main + # Skip PRs where docs, Rust code, and dependencies aren't modified. paths: # doc source files - 'book/**' @@ -45,7 +52,7 @@ on: - '.cargo/config.toml' - '**/clippy.toml' # workflow definitions - - '.github/workflows/docs.yml' + - '.github/workflows/docs-deploy-firebase.yml' env: RUST_LOG: ${{ vars.RUST_LOG }} @@ -60,9 +67,15 @@ env: # https://github.com/ZcashFoundation/zebra/blob/main/.cargo/config.toml#L87 RUSTDOCFLAGS: --html-in-header katex-header.html -D warnings -A rustdoc::private_intra_doc_links +# IMPORTANT +# +# The job names in `docs-deploy-firebase.yml`, `docs-deploy-firebase.patch.yml` and +# `docs-deploy-firebase.patch-external.yml` must be kept in sync. jobs: build-docs-book: name: Build and Deploy Zebra Book Docs + # Skip PRs from external repositories, let them pass, and then Mergify will check them + if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }} timeout-minutes: 5 runs-on: ubuntu-latest permissions: @@ -79,7 +92,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Setup mdBook - uses: jontze/action-mdbook@v2.2.2 + uses: jontze/action-mdbook@v3.0.0 with: token: ${{ secrets.GITHUB_TOKEN }} mdbook-version: '~0.4' @@ -93,7 +106,7 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v1.1.1 + uses: google-github-actions/auth@v2.1.1 with: retries: '3' workload_identity_provider: '${{ vars.GCP_WIF }}' @@ -114,64 +127,9 @@ jobs: projectId: ${{ vars.GCP_FIREBASE_PROJECT }} target: docs-book - build-docs-external: - name: Build and Deploy Zebra External Docs - timeout-minutes: 45 - runs-on: ubuntu-latest - permissions: - checks: write - contents: 'read' - id-token: 'write' - pull-requests: write - steps: - - name: Checkout the source code - uses: actions/checkout@v4.1.1 - with: - persist-credentials: false - - - name: Install last version of Protoc - uses: arduino/setup-protoc@v2.1.0 - with: - version: '23.x' - repo-token: ${{ secrets.GITHUB_TOKEN }} - - # Setup Rust with beta toolchain and default profile (to include rust-docs) - - name: Setup Rust - run: | - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=beta --profile=default - - - uses: Swatinem/rust-cache@v2.7.0 - - - name: Build external docs - run: | - # Exclude zebra-utils, it is not for library or app users - cargo doc --no-deps --workspace --all-features --exclude zebra-utils --target-dir "$(pwd)"/target/external - - # Setup gcloud CLI - - name: Authenticate to Google Cloud - id: auth - uses: google-github-actions/auth@v1.1.1 - with: - retries: '3' - workload_identity_provider: '${{ vars.GCP_WIF }}' - service_account: '${{ vars.GCP_FIREBASE_SA }}' - - # TODO: remove this step after issue https://github.com/FirebaseExtended/action-hosting-deploy/issues/174 is fixed - - name: Add $GCP_FIREBASE_SA_PATH to env - run: | - # shellcheck disable=SC2002 - echo "GCP_FIREBASE_SA_PATH=$(cat ${{ steps.auth.outputs.credentials_file_path }} | tr -d '\n')" >> "$GITHUB_ENV" - - - name: Deploy external docs to firebase - uses: FirebaseExtended/action-hosting-deploy@v0.7.1 - with: - firebaseServiceAccount: ${{ env.GCP_FIREBASE_SA_PATH }} - channelId: ${{ env.FIREBASE_CHANNEL }} - target: docs-external - projectId: ${{ vars.GCP_FIREBASE_PROJECT }} - build-docs-internal: name: Build and Deploy Zebra Internal Docs + if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }} timeout-minutes: 45 runs-on: ubuntu-latest permissions: @@ -188,7 +146,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Install last version of Protoc - uses: arduino/setup-protoc@v2.1.0 + uses: arduino/setup-protoc@v3.0.0 with: version: '23.x' repo-token: ${{ secrets.GITHUB_TOKEN }} @@ -198,7 +156,7 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=beta --profile=default - - uses: Swatinem/rust-cache@v2.7.0 + - uses: Swatinem/rust-cache@v2.7.3 - name: Build internal docs run: | @@ -207,7 +165,7 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v1.1.1 + uses: google-github-actions/auth@v2.1.1 with: retries: '3' workload_identity_provider: '${{ vars.GCP_WIF }}' diff --git a/.github/workflows/docs-dockerhub-description.yml b/.github/workflows/docs-dockerhub-description.yml index e20b126646f..010e3ef9f7c 100644 --- a/.github/workflows/docs-dockerhub-description.yml +++ b/.github/workflows/docs-dockerhub-description.yml @@ -22,7 +22,7 @@ jobs: persist-credentials: false - name: Docker Hub Description - uses: peter-evans/dockerhub-description@v3.4.2 + uses: peter-evans/dockerhub-description@v4.0.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_PASSWORD }} diff --git a/.github/workflows/manual-zcashd-deploy.yml b/.github/workflows/manual-zcashd-deploy.yml index 11ace8f39e1..9b84c5f5360 100644 --- a/.github/workflows/manual-zcashd-deploy.yml +++ b/.github/workflows/manual-zcashd-deploy.yml @@ -1,3 +1,10 @@ +# This workflow is designed for manually deploying zcashd nodes to Google Cloud Platform (GCP) based on user inputs. +# - Allows selection of network type (Mainnet or Testnet) and instance group size. +# - Converts network name to lowercase to comply with GCP labeling requirements. +# - Authenticates with Google Cloud using provided credentials. +# - Creates a GCP instance template from a container image of zcashd. +# - Checks if the specified instance group already exists. +# - Depending on the existence check, either creates a new managed instance group or updates the existing one with the new template. name: Zcashd Manual Deploy on: @@ -45,14 +52,14 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v1.1.1 + uses: google-github-actions/auth@v2.1.1 with: retries: '3' workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v1.1.1 + uses: google-github-actions/setup-gcloud@v2.1.0 # Create instance template from container image - name: Create instance template diff --git a/.github/workflows/release-binaries.yml b/.github/workflows/release-binaries.yml index f0b60acbad5..1cf4e6b5da7 100644 --- a/.github/workflows/release-binaries.yml +++ b/.github/workflows/release-binaries.yml @@ -18,29 +18,34 @@ jobs: # Each time this workflow is executed, a build will be triggered to create a new image # with the corresponding tags using information from git - # The image will be named `zebra:` - build: - name: Build Release Docker + # The image will be named `zebra:-experimental` + build-experimental: + name: Build Experimental Features Release Docker uses: ./.github/workflows/sub-build-docker-image.yml with: dockerfile_path: ./docker/Dockerfile dockerfile_target: runtime image_name: zebra - features: ${{ vars.RUST_PROD_FEATURES }} + tag_suffix: -experimental + features: ${{ format('{0} {1}', vars.RUST_PROD_FEATURES, vars.RUST_EXPERIMENTAL_FEATURES) }} rust_log: ${{ vars.RUST_LOG }} # This step needs access to Docker Hub secrets to run successfully secrets: inherit - # The image will be named `zebra:.experimental` - build-experimental: - name: Build Experimental Features Release Docker + # The image will be named `zebra:` + # It should be built last, so tags with the same name point to the production build, not the experimental build. + build: + name: Build Release Docker + # Run this build last, regardless of whether experimental worked + needs: build-experimental + if: always() uses: ./.github/workflows/sub-build-docker-image.yml with: dockerfile_path: ./docker/Dockerfile dockerfile_target: runtime image_name: zebra - tag_suffix: .experimental - features: ${{ format('{0} {1}', vars.RUST_PROD_FEATURES, vars.RUST_EXPERIMENTAL_FEATURES) }} + latest_tag: true + features: ${{ vars.RUST_PROD_FEATURES }} rust_log: ${{ vars.RUST_LOG }} # This step needs access to Docker Hub secrets to run successfully secrets: inherit diff --git a/.github/workflows/release-crates-io.yml b/.github/workflows/release-crates-io.yml index 8546ee774db..471ed9e3fc3 100644 --- a/.github/workflows/release-crates-io.yml +++ b/.github/workflows/release-crates-io.yml @@ -85,7 +85,7 @@ jobs: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal - name: Install cargo-release - uses: baptiste0928/cargo-install@v2.2.0 + uses: baptiste0928/cargo-install@v3.0.0 with: crate: cargo-release @@ -93,26 +93,9 @@ jobs: # # These steps should be kept up to date with the release checklist. # - # TODO: move these steps into a script which is run in the release checklist and CI - name: Crate release dry run run: | - set -ex - git config --global user.email "release-tests-no-reply@zfnd.org" - git config --global user.name "Automated Release Test" - # This script must be the same as: - # https://github.com/ZcashFoundation/zebra/blob/main/.github/PULL_REQUEST_TEMPLATE/release-checklist.md#update-crate-versions - # with an extra `--no-confirm` argument for non-interactive testing. - cargo release version --verbose --execute --no-confirm --allow-branch '*' --workspace --exclude zebrad beta - cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebrad patch - cargo release replace --verbose --execute --no-confirm --allow-branch '*' --package zebrad - cargo release commit --verbose --execute --no-confirm --allow-branch '*' - # Check the release will work using a dry run - # - # Workaround unpublished dependency version errors by skipping those crates: - # https://github.com/crate-ci/cargo-release/issues/691 - # - # TODO: check all crates after fixing these errors - cargo release publish --verbose --dry-run --allow-branch '*' --workspace --exclude zebra-consensus --exclude zebra-utils --exclude zebrad + ./.github/workflows/scripts/release-crates-dry-run.sh # TODO: actually do the release here #release-crates: diff --git a/.github/workflows/release-drafter.yml b/.github/workflows/release-drafter.yml index 39e1ba7bde2..fd4c39a728c 100644 --- a/.github/workflows/release-drafter.yml +++ b/.github/workflows/release-drafter.yml @@ -1,6 +1,10 @@ -# Creates a draft release with all the PR names since the last release. +# This workflow automates the creation and updating of draft releases. It compiles PR titles into the draft release notes. # https://github.com/ZcashFoundation/zebra/releases # +# - Updates the draft release upon each merge into 'main'. +# - Utilizes the release-drafter GitHub Action to accumulate PR titles since the last release into a draft release note. +# - Suitable permissions are set for creating releases and handling pull requests. +# # Workflow is based on: # https://github.com/marketplace/actions/release-drafter#usage name: Release Drafter @@ -34,7 +38,7 @@ jobs: runs-on: ubuntu-latest steps: # Drafts your next Release notes - - uses: release-drafter/release-drafter@v5 + - uses: release-drafter/release-drafter@v6 with: config-name: release-drafter.yml commitish: main diff --git a/.github/workflows/scripts/gcp-delete-old-cache-images.sh b/.github/workflows/scripts/gcp-delete-old-cache-images.sh new file mode 100755 index 00000000000..a614e5fa576 --- /dev/null +++ b/.github/workflows/scripts/gcp-delete-old-cache-images.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +# Function to handle image deletion logic +delete_images() { + local image_type="$1" + local filter="$2" + local kept_images=0 + + echo "Processing ${image_type} images" + images=$(gcloud compute images list --sort-by=~creationTimestamp --filter="${filter} AND creationTimestamp < ${DELETE_BEFORE_DATE}" --format='value(NAME)') + + for image in ${images}; do + if [[ "${kept_images}" -lt "${KEEP_LATEST_IMAGE_COUNT}" ]]; then + ((kept_images++)) + echo "Keeping image ${kept_images} named ${image}" + else + echo "Deleting image: ${image}" + gcloud compute images delete "${image}" || echo "Failed to delete image: ${image}" + fi + done +} + +# Check if necessary variables are set +if ! [[ "${DELETE_AGE_DAYS}" =~ ^[0-9]+$ && "${KEEP_LATEST_IMAGE_COUNT}" =~ ^[0-9]+$ ]]; then + echo "ERROR: One or more required variables are not set or not numeric" + exit 1 +fi + +# Set pipefail +set -o pipefail + +# Calculate the date before which images should be deleted +DELETE_BEFORE_DATE=$(date --date="${DELETE_AGE_DAYS} days ago" '+%Y%m%d') + +# Mainnet and Testnet zebrad checkpoint +delete_images "Mainnet zebrad checkpoint" "name~^zebrad-cache-.*-mainnet-checkpoint" # As of April 2023, these disk names look like: zebrad-cache-6556-merge-a2ca4de-v25-mainnet-tip(-u)?-140654 +delete_images "Testnet zebrad checkpoint" "name~^zebrad-cache-.*-testnet-checkpoint" + +# Mainnet and Testnet zebrad tip +delete_images "Mainnet zebrad tip" "name~^zebrad-cache-.*-mainnet-tip" +delete_images "Testnet zebrad tip" "name~^zebrad-cache-.*-testnet-tip" + +# Mainnet and Testnet lightwalletd tip +delete_images "Mainnet lightwalletd tip" "name~^lwd-cache-.*-mainnet-tip" +delete_images "Testnet lightwalletd tip" "name~^lwd-cache-.*-testnet-tip" diff --git a/.github/workflows/scripts/gcp-delete-old-disks.sh b/.github/workflows/scripts/gcp-delete-old-disks.sh new file mode 100755 index 00000000000..1a6dd83051a --- /dev/null +++ b/.github/workflows/scripts/gcp-delete-old-disks.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +# Check if DELETE_AGE_DAYS is set and is a number +if ! [[ "${DELETE_AGE_DAYS}" =~ ^[0-9]+$ ]]; then + echo "ERROR: DELETE_AGE_DAYS is not set or not a number" + exit 1 +fi + +# Set pipefail to catch errors in pipelines +set -o pipefail + +# Calculate the date before which disks should be deleted +DELETE_BEFORE_DATE=$(date --date="${DELETE_AGE_DAYS} days ago" '+%Y%m%d') + +# Fetch disks created by PR jobs, and other jobs that use a commit hash +if ! COMMIT_DISKS=$(gcloud compute disks list --sort-by=creationTimestamp --filter="name~-[0-9a-f]{7,}$ AND creationTimestamp < ${DELETE_BEFORE_DATE}" --format='value(NAME,LOCATION,LOCATION_SCOPE)' | sed 's/\(.*\)\t\(.*\)\t\(.*\)/\1 --\3=\2/'); then + echo "Error fetching COMMIT_DISKS." + exit 1 +fi + +# Delete commit disks if any are found +IFS=$'\n' +for DISK_AND_LOCATION in ${COMMIT_DISKS}; do + IFS=$' ' + echo "Deleting disk: ${DISK_AND_LOCATION}" + if ! gcloud compute disks delete --verbosity=info "${DISK_AND_LOCATION}"; then + echo "Failed to delete disk: ${DISK_AND_LOCATION}" + fi + IFS=$'\n' +done +IFS=$' \t\n' # Reset IFS to its default value + +# Fetch disks created by managed instance groups, and other jobs that start with "zebrad-" +if ! ZEBRAD_DISKS=$(gcloud compute disks list --sort-by=creationTimestamp --filter="name~^zebrad- AND creationTimestamp < ${DELETE_BEFORE_DATE}" --format='value(NAME,LOCATION,LOCATION_SCOPE)' | sed 's/\(.*\)\t\(.*\)\t\(.*\)/\1 --\3=\2/'); then + echo "Error fetching ZEBRAD_DISKS." + exit 1 +fi + +# Delete zebrad disks if any are found +IFS=$'\n' +for DISK_AND_LOCATION in ${ZEBRAD_DISKS}; do + IFS=$' ' + echo "Deleting disk: ${DISK_AND_LOCATION}" + if ! gcloud compute disks delete --verbosity=info "${DISK_AND_LOCATION}"; then + echo "Failed to delete disk: ${DISK_AND_LOCATION}" + fi + IFS=$'\n' +done +IFS=$' \t\n' # Reset IFS to its default value diff --git a/.github/workflows/scripts/gcp-delete-old-instances.sh b/.github/workflows/scripts/gcp-delete-old-instances.sh new file mode 100755 index 00000000000..12ea0d1c35e --- /dev/null +++ b/.github/workflows/scripts/gcp-delete-old-instances.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +# Check if DELETE_INSTANCE_DAYS is set and is a number +if ! [[ "${DELETE_INSTANCE_DAYS}" =~ ^[0-9]+$ ]]; then + echo "ERROR: DELETE_INSTANCE_DAYS is not set or not a number" + exit 1 +fi + +# Set pipefail to catch errors in pipelines +set -o pipefail + +# Calculate the date before which instances should be deleted +DELETE_BEFORE_DATE=$(date --date="${DELETE_INSTANCE_DAYS} days ago" '+%Y%m%d') + +# Check if gcloud command is available +if ! command -v gcloud &> /dev/null; then + echo "ERROR: gcloud command not found" + exit 1 +fi + +# Fetch the list of instances to delete +if ! INSTANCES=$(gcloud compute instances list --sort-by=creationTimestamp --filter="name~-[0-9a-f]{7,}$ AND creationTimestamp < ${DELETE_BEFORE_DATE}" --format='value(NAME,ZONE)' | sed 's/\(.*\)\t\(.*\)/\1 --zone=\2/'); then + echo "Error fetching instances." + exit 1 +fi + +# Delete instances if any are found +if [[ -n "${INSTANCES}" ]]; then + IFS=$'\n' + for INSTANCE_AND_ZONE in ${INSTANCES}; do + IFS=$' ' + echo "Deleting instance: ${INSTANCE_AND_ZONE}" + gcloud compute instances delete --verbosity=info "${INSTANCE_AND_ZONE}" --delete-disks=all || { + echo "Failed to delete instance: ${INSTANCE_AND_ZONE}" + continue + } + IFS=$'\n' + done + IFS=$' \t\n' # Reset IFS to its default value +else + echo "No instances to delete." +fi diff --git a/.github/workflows/scripts/gcp-delete-old-templates.sh b/.github/workflows/scripts/gcp-delete-old-templates.sh new file mode 100755 index 00000000000..29898dc2142 --- /dev/null +++ b/.github/workflows/scripts/gcp-delete-old-templates.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +# Check if DELETE_AGE_DAYS is set and is a number +if ! [[ "${DELETE_AGE_DAYS}" =~ ^[0-9]+$ ]]; then + echo "ERROR: DELETE_AGE_DAYS is not set or not a number" + exit 1 +fi + +# Set pipefail to catch errors in pipelines +set -o pipefail + +# Calculate the date before which templates should be deleted +DELETE_BEFORE_DATE=$(date --date="${DELETE_AGE_DAYS} days ago" '+%Y%m%d') + +# Check if gcloud command is available +if ! command -v gcloud &> /dev/null; then + echo "ERROR: gcloud command not found" + exit 1 +fi + +# Fetch the list of instance templates to delete +if ! TEMPLATES=$(gcloud compute instance-templates list --sort-by=creationTimestamp --filter="name~-[0-9a-f]{7,}$ AND creationTimestamp < ${DELETE_BEFORE_DATE}" --format='value(NAME)'); then + echo "Error fetching instance templates." + exit 1 +fi + +# Delete templates if any are found +for TEMPLATE in ${TEMPLATES}; do + echo "Deleting template: ${TEMPLATE}" + if ! gcloud compute instance-templates delete "${TEMPLATE}"; then + echo "Failed to delete template: ${TEMPLATE}" + fi +done diff --git a/.github/workflows/scripts/gcp-get-available-disks.sh b/.github/workflows/scripts/gcp-get-available-disks.sh new file mode 100755 index 00000000000..667c6f36c4b --- /dev/null +++ b/.github/workflows/scripts/gcp-get-available-disks.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +# Description: +# Check if there are cached state disks available for subsequent jobs to use. +# +# This lookup uses the state version from constants.rs. +# It accepts disks generated by any branch, including draft and unmerged PRs. +# +# If the disk exists, sets the corresponding output to "true": +# - lwd_tip_disk +# - zebra_tip_disk +# - zebra_checkpoint_disk + +set -euxo pipefail + + +LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" "${GITHUB_WORKSPACE}/zebra-state/src/constants.rs" | grep -oE "[0-9]+" | tail -n1) +echo "STATE_VERSION: ${LOCAL_STATE_VERSION}" + +# Function to find a disk image and output its name +find_disk_image() { +local base_name="${1}" +local disk_type="${2}" +local disk_pattern="${base_name}-cache" +local output_var="${base_name}_${disk_type}_disk" +local disk_image + +disk_image=$(gcloud compute images list --filter="status=READY AND name~${disk_pattern}-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${disk_type}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) + +if [[ -z "${disk_image}" ]]; then + echo "No ${disk_type^^} disk found for ${base_name^^} on network: ${NETWORK}" + echo "${output_var}=false" >> "${GITHUB_OUTPUT}" +else + echo "Disk: ${disk_image}" + echo "${output_var}=true" >> "${GITHUB_OUTPUT}" +fi +} + +# Find and output LWD and Zebra disks +find_disk_image "lwd" "tip" +find_disk_image "zebrad" "tip" +find_disk_image "zebrad" "checkpoint" diff --git a/.github/workflows/scripts/gcp-get-cached-disks.sh b/.github/workflows/scripts/gcp-get-cached-disks.sh new file mode 100755 index 00000000000..9b05c257096 --- /dev/null +++ b/.github/workflows/scripts/gcp-get-cached-disks.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash + +# Description: +# This script finds a cached Google Cloud Compute image based on specific criteria. +# It prioritizes images from the current commit, falls back to the main branch, +# and finally checks other branches if needed. The selected image is used for +# setting up the environment in a CI/CD pipeline. + +set -eo pipefail + +# Function to find and report a cached disk image +find_cached_disk_image() { + local search_pattern="${1}" + local git_source="${2}" + local disk_name + + disk_name=$(gcloud compute images list --filter="status=READY AND name~${search_pattern}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) + + # Use >&2 to redirect to stderr and avoid sending wrong assignments to stdout + if [[ -n "${disk_name}" ]]; then + echo "Found ${git_source} Disk: ${disk_name}" >&2 + disk_description=$(gcloud compute images describe "${disk_name}" --format="value(DESCRIPTION)") + echo "Description: ${disk_description}" >&2 + echo "${disk_name}" # This is the actual return value when a disk is found + else + echo "No ${git_source} disk found." >&2 + fi +} + +# Extract local state version +echo "Extracting local state version..." +LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" "${GITHUB_WORKSPACE}/zebra-state/src/constants.rs" | grep -oE "[0-9]+" | tail -n1) +echo "STATE_VERSION: ${LOCAL_STATE_VERSION}" + +# Define DISK_PREFIX based on the requiring state directory +if [[ "${NEEDS_LWD_STATE}" == "true" ]]; then + DISK_PREFIX="${LWD_STATE_DIR}" +else + DISK_PREFIX="${ZEBRA_STATE_DIR:-${DISK_PREFIX}}" +fi + +# Find the most suitable cached disk image +echo "Finding the most suitable cached disk image..." +if [[ -z "${CACHED_DISK_NAME}" ]]; then + # Try to find a cached disk image from the current commit + COMMIT_DISK_PREFIX="${DISK_PREFIX}-.+-${GITHUB_SHA_SHORT}-v${LOCAL_STATE_VERSION}-${NETWORK}-${DISK_SUFFIX}" + CACHED_DISK_NAME=$(find_cached_disk_image "${COMMIT_DISK_PREFIX}" "commit") + # If no cached disk image is found, try to find one from the main branch + if [[ "${PREFER_MAIN_CACHED_STATE}" == "true" ]]; then + MAIN_DISK_PREFIX="${DISK_PREFIX}-main-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${DISK_SUFFIX}" + CACHED_DISK_NAME=$(find_cached_disk_image "${MAIN_DISK_PREFIX}" "main branch") + # Else, try to find one from any branch + else + ANY_DISK_PREFIX="${DISK_PREFIX}-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${DISK_SUFFIX}" + CACHED_DISK_NAME=$(find_cached_disk_image "${ANY_DISK_PREFIX}" "any branch") + fi +fi + +# Handle case where no suitable disk image is found +if [[ -z "${CACHED_DISK_NAME}" ]]; then + echo "No suitable cached state disk available." + echo "Expected pattern: ${COMMIT_DISK_PREFIX}" + echo "Cached state test jobs must depend on the cached state rebuild job." + exit 1 +fi + +echo "Selected Disk: ${CACHED_DISK_NAME}" + +# Exporting variables for subsequent steps +echo "Exporting variables for subsequent steps..." +export CACHED_DISK_NAME="${CACHED_DISK_NAME}" +export LOCAL_STATE_VERSION="${LOCAL_STATE_VERSION}" diff --git a/.github/workflows/scripts/gcp-vm-startup-script.sh b/.github/workflows/scripts/gcp-vm-startup-script.sh index da65ff26770..7098ed8981f 100755 --- a/.github/workflows/scripts/gcp-vm-startup-script.sh +++ b/.github/workflows/scripts/gcp-vm-startup-script.sh @@ -1,4 +1,4 @@ -#! /bin/bash +#!/usr/bin/env bash # Increase the Google Cloud instance sshd connection limit # # This script appends 'MaxStartups 500' to /etc/ssh/sshd_config allowing up to 500 diff --git a/.github/workflows/scripts/release-crates-dry-run.sh b/.github/workflows/scripts/release-crates-dry-run.sh new file mode 100755 index 00000000000..cbae0ca35c3 --- /dev/null +++ b/.github/workflows/scripts/release-crates-dry-run.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +set -ex + +# Check if necessary tools are installed +if ! command -v git &> /dev/null || ! command -v cargo &> /dev/null; then + echo "ERROR: Required tools (git, cargo) are not installed." + exit 1 +fi + +git config --global user.email "release-tests-no-reply@zfnd.org" +git config --global user.name "Automated Release Test" + +# Ensure cargo-release is installed +if ! cargo release --version &> /dev/null; then + echo "ERROR: cargo release must be installed." + exit 1 +fi + +# Release process +# We use the same commands as the [release drafter](https://github.com/ZcashFoundation/zebra/blob/main/.github/PULL_REQUEST_TEMPLATE/release-checklist.md#update-crate-versions) +# with an extra `--no-confirm` argument for non-interactive testing. +# Update everything except for alpha crates and zebrad: +cargo release version --verbose --execute --no-confirm --allow-branch '*' --workspace --exclude zebrad --exclude zebra-scan --exclude zebra-grpc beta + +# Due to a bug in cargo-release, we need to pass exact versions for alpha crates: +cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebra-scan 0.1.0-alpha.5 +cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebra-grpc 0.1.0-alpha.3 + +# Update zebrad: +cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebrad patch +# Continue with the release process: +cargo release replace --verbose --execute --no-confirm --allow-branch '*' --package zebrad +cargo release commit --verbose --execute --no-confirm --allow-branch '*' + +# Dry run to check the release +# Workaround for unpublished dependency version errors: https://github.com/crate-ci/cargo-release/issues/691 +# TODO: check all crates after fixing these errors +cargo release publish --verbose --dry-run --allow-branch '*' --workspace --exclude zebra-consensus --exclude zebra-utils --exclude zebrad --exclude zebra-scan + +echo "Release process completed." diff --git a/.github/workflows/sub-build-docker-image.yml b/.github/workflows/sub-build-docker-image.yml index 1b929324619..e804158cd12 100644 --- a/.github/workflows/sub-build-docker-image.yml +++ b/.github/workflows/sub-build-docker-image.yml @@ -1,3 +1,9 @@ +# This workflow automates the building and pushing of Docker images based on user-defined inputs. It includes: +# - Accepting various inputs like image name, Dockerfile path, target, and additional Rust-related parameters. +# - Authenticates with Google Cloud and logs into Google Artifact Registry and DockerHub. +# - Uses Docker Buildx for improved build performance and caching. +# - Builds the Docker image and pushes it to both Google Artifact Registry and potentially DockerHub, depending on release type. +# - Manages caching strategies to optimize build times across different branches. name: Build docker image on: @@ -33,6 +39,10 @@ on: test_features: required: false type: string + latest_tag: + required: false + type: boolean + default: false tag_suffix: required: false type: string @@ -79,7 +89,7 @@ jobs: # Automatic tag management and OCI Image Format Specification for labels - name: Docker meta id: meta - uses: docker/metadata-action@v5.0.0 + uses: docker/metadata-action@v5.5.1 with: # list of Docker images to use as base name for tags images: | @@ -88,19 +98,25 @@ jobs: # appends inputs.tag_suffix to image tags/names flavor: | suffix=${{ inputs.tag_suffix }} + latest=${{ inputs.latest_tag }} # generate Docker tags based on the following events/attributes tags: | - type=schedule - # semver and ref,tag automatically add a "latest" tag, but only on stable releases + # These DockerHub release tags support the following use cases: + # - `latest`: always use the latest Zebra release when you pull or update + # - `v1.x.y` or `1.x.y`: always use the exact version, don't automatically upgrade + # + # `semver` adds a "latest" tag if `inputs.latest_tag` is `true`. type=semver,pattern={{version}} - type=semver,pattern={{major}}.{{minor}} - type=semver,pattern={{major}} type=ref,event=tag - type=ref,event=branch + # DockerHub release and CI tags. + # This tag makes sure tests are using exactly the right image, even when multiple PRs run at the same time. + type=sha,event=push + # These CI-only tags support CI on PRs, the main branch, and scheduled full syncs. + # These tags do not appear on DockerHub, because DockerHub images are only published on the release event. type=ref,event=pr - type=sha - # edge is the latest commit on the default branch. + type=ref,event=branch type=edge,enable={{is_default_branch}} + type=schedule # Setup Docker Buildx to allow use of docker cache layers from GH - name: Set up Docker Buildx @@ -109,7 +125,7 @@ jobs: - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v1.1.1 + uses: google-github-actions/auth@v2.1.1 with: retries: '3' workload_identity_provider: '${{ vars.GCP_WIF }}' @@ -139,7 +155,7 @@ jobs: # Build and push image to Google Artifact Registry, and possibly DockerHub - name: Build & push id: docker_build - uses: docker/build-push-action@v5.0.0 + uses: docker/build-push-action@v5.1.0 with: target: ${{ inputs.dockerfile_target }} context: . diff --git a/.github/workflows/sub-build-lightwalletd.yml b/.github/workflows/sub-build-lightwalletd.yml index 513b4bd1cd9..258a2f0052f 100644 --- a/.github/workflows/sub-build-lightwalletd.yml +++ b/.github/workflows/sub-build-lightwalletd.yml @@ -60,7 +60,7 @@ jobs: with: # Note: check service.proto when modifying lightwalletd repo repository: zcash/lightwalletd - ref: 'master' + ref: 'v0.4.16' persist-credentials: false - uses: actions/checkout@v4.1.1 @@ -76,7 +76,7 @@ jobs: # Automatic tag management and OCI Image Format Specification for labels - name: Docker meta id: meta - uses: docker/metadata-action@v5.0.0 + uses: docker/metadata-action@v5.5.1 with: # list of Docker images to use as base name for tags images: | @@ -111,7 +111,7 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v1.1.1 + uses: google-github-actions/auth@v2.1.1 with: retries: '3' workload_identity_provider: '${{ vars.GCP_WIF }}' @@ -119,7 +119,7 @@ jobs: token_format: 'access_token' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v1.1.1 + uses: google-github-actions/setup-gcloud@v2.1.0 - name: Login to Google Artifact Registry uses: docker/login-action@v3.0.0 @@ -131,7 +131,7 @@ jobs: # Build and push image to Google Artifact Registry - name: Build & push id: docker_build - uses: docker/build-push-action@v5.0.0 + uses: docker/build-push-action@v5.1.0 with: target: build context: . diff --git a/.github/workflows/sub-build-zcash-params.yml b/.github/workflows/sub-build-zcash-params.yml deleted file mode 100644 index ee08b6fd3c2..00000000000 --- a/.github/workflows/sub-build-zcash-params.yml +++ /dev/null @@ -1,45 +0,0 @@ -name: Build zcash-params - -# Ensures that only one workflow task will run at a time. Previous deployments, if -# already in process, won't get cancelled. Instead, we let the first to complete -# then queue the latest pending workflow, cancelling any workflows in between -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: false - -on: - workflow_dispatch: - inputs: - no_cache: - description: 'Disable the Docker cache for this build' - required: false - type: boolean - default: false - - push: - branches: - - 'main' - paths: - # parameter download code - - 'zebra-consensus/src/primitives/groth16/params.rs' - - 'zebra-consensus/src/router.rs' - - 'zebrad/src/commands/download.rs' - - 'zebrad/src/commands/start.rs' - # workflow definitions - - 'docker/zcash-params/Dockerfile' - - '.dockerignore' - - '.github/workflows/sub-build-zcash-params.yml' - - '.github/workflows/sub-build-docker-image.yml' - -jobs: - build: - name: Build Zcash Params Docker - uses: ./.github/workflows/sub-build-docker-image.yml - with: - dockerfile_path: ./docker/zcash-params/Dockerfile - dockerfile_target: release - image_name: zcash-params - no_cache: ${{ inputs.no_cache || false }} - rust_backtrace: full - rust_lib_backtrace: full - rust_log: info diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index fd5dd6c4394..e132f3ac8d6 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -136,7 +136,7 @@ jobs: # Install our SSH secret - name: Install private SSH key - uses: shimataro/ssh-key-action@v2.6.1 + uses: shimataro/ssh-key-action@v2.7.0 with: key: ${{ secrets.GCP_SSH_PRIVATE_KEY }} name: google_compute_engine @@ -150,14 +150,14 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v1.1.1 + uses: google-github-actions/auth@v2.1.1 with: retries: '3' workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v1.1.1 + uses: google-github-actions/setup-gcloud@v2.1.0 # Find a cached state disk for this job, matching all of: # - disk cached state (lwd_state_dir/zebra_state_dir or disk_prefix) - zebrad-cache or lwd-cache @@ -183,74 +183,22 @@ jobs: - name: Find ${{ inputs.test_id }} cached state disk id: get-disk-name if: ${{ inputs.needs_zebra_state || inputs.needs_lwd_state }} + env: + GITHUB_SHA_SHORT: ${{ env.GITHUB_SHA_SHORT }} + NEEDS_LWD_STATE: ${{ inputs.needs_lwd_state }} + LWD_STATE_DIR: ${{ inputs.lwd_state_dir }} + ZEBRA_STATE_DIR: ${{ inputs.zebra_state_dir }} + DISK_PREFIX: ${{ inputs.disk_prefix }} + NETWORK: ${{ env.NETWORK }} # use lowercase version from env, not input + DISK_SUFFIX: ${{ inputs.disk_suffix }} + PREFER_MAIN_CACHED_STATE: ${{ inputs.prefer_main_cached_state }} run: | - set -x - LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" "$GITHUB_WORKSPACE/zebra-state/src/constants.rs" | grep -oE "[0-9]+" | tail -n1) - echo "STATE_VERSION: $LOCAL_STATE_VERSION" + source ./.github/workflows/scripts/gcp-get-cached-disks.sh + echo "cached_disk_name=${CACHED_DISK_NAME}" >> "${GITHUB_OUTPUT}" + echo "STATE_VERSION=${LOCAL_STATE_VERSION}" >> "${GITHUB_ENV}" + echo "CACHED_DISK_NAME=${CACHED_DISK_NAME}" >> "${GITHUB_ENV}" + echo "DISK_OPTION=image=$CACHED_DISK_NAME," >> "${GITHUB_ENV}" - if [[ "${{ inputs.needs_lwd_state }}" == "true" ]]; then - DISK_PREFIX=${{ inputs.lwd_state_dir }} - else - DISK_PREFIX=${{ inputs.zebra_state_dir || inputs.disk_prefix }} - fi - - # Try to find an image generated from a previous step or run of this commit. - # Fields are listed in the "Create image from state disk" step. - # - # We don't want to match the full branch name here, because: - # - we want to ignore the different GITHUB_REFs across manually triggered jobs, - # pushed branches, and PRs, - # - previous commits might have been buggy, - # or they might have worked and hide bugs in this commit - # (we can't avoid this issue entirely, but we don't want to make it more likely), and - # - the branch name might have been shortened for the image. - # - # The probability of two matching short commit hashes within the same month is very low. - COMMIT_DISK_PREFIX="${DISK_PREFIX}-.+-${{ env.GITHUB_SHA_SHORT }}-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" - COMMIT_CACHED_DISK_NAME=$(gcloud compute images list --filter="status=READY AND name~${COMMIT_DISK_PREFIX}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) - echo "${GITHUB_REF_SLUG_URL}-${{ env.GITHUB_SHA_SHORT }} Disk: $COMMIT_CACHED_DISK_NAME" - if [[ -n "$COMMIT_CACHED_DISK_NAME" ]]; then - echo "Description: $(gcloud compute images describe $COMMIT_CACHED_DISK_NAME --format='value(DESCRIPTION)')" - fi - - # Try to find an image generated from the main branch - MAIN_CACHED_DISK_NAME=$(gcloud compute images list --filter="status=READY AND name~${DISK_PREFIX}-main-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) - echo "main Disk: $MAIN_CACHED_DISK_NAME" - if [[ -n "$MAIN_CACHED_DISK_NAME" ]]; then - echo "Description: $(gcloud compute images describe $MAIN_CACHED_DISK_NAME --format='value(DESCRIPTION)')" - fi - - # Try to find an image generated from any other branch - ANY_CACHED_DISK_NAME=$(gcloud compute images list --filter="status=READY AND name~${DISK_PREFIX}-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) - echo "any branch Disk: $ANY_CACHED_DISK_NAME" - if [[ -n "$ANY_CACHED_DISK_NAME" ]]; then - echo "Description: $(gcloud compute images describe $ANY_CACHED_DISK_NAME --format='value(DESCRIPTION)')" - fi - - # Select a cached disk based on the job settings - CACHED_DISK_NAME="$COMMIT_CACHED_DISK_NAME" - if [[ -z "$CACHED_DISK_NAME" ]] && [[ "${{ inputs.prefer_main_cached_state }}" == "true" ]]; then - echo "Preferring main branch cached state to other branches..." - CACHED_DISK_NAME="$MAIN_CACHED_DISK_NAME" - fi - if [[ -z "$CACHED_DISK_NAME" ]]; then - CACHED_DISK_NAME="$ANY_CACHED_DISK_NAME" - fi - - if [[ -z "$CACHED_DISK_NAME" ]]; then - echo "No cached state disk available" - echo "Expected ${COMMIT_DISK_PREFIX}" - echo "Also searched for cached disks from other branches" - echo "Cached state test jobs must depend on the cached state rebuild job" - exit 1 - fi - - echo "Selected Disk: $CACHED_DISK_NAME" - echo "cached_disk_name=$CACHED_DISK_NAME" >> "$GITHUB_OUTPUT" - - echo "STATE_VERSION=$LOCAL_STATE_VERSION" >> "$GITHUB_ENV" - echo "CACHED_DISK_NAME=$CACHED_DISK_NAME" >> "$GITHUB_ENV" - echo "DISK_OPTION=image=$CACHED_DISK_NAME," >> "$GITHUB_ENV" # Create a Compute Engine virtual machine and attach a cached state disk using the # $CACHED_DISK_NAME variable as the source image to populate the disk cached state @@ -485,7 +433,7 @@ jobs: # Install our SSH secret - name: Install private SSH key - uses: shimataro/ssh-key-action@v2.6.1 + uses: shimataro/ssh-key-action@v2.7.0 with: key: ${{ secrets.GCP_SSH_PRIVATE_KEY }} name: google_compute_engine @@ -499,13 +447,13 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v1.1.1 + uses: google-github-actions/auth@v2.1.1 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v1.1.1 + uses: google-github-actions/setup-gcloud@v2.1.0 # Get the state version from the local constants.rs file to be used in the image creation, # as the state version is part of the disk image name. @@ -776,13 +724,13 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v1.1.1 + uses: google-github-actions/auth@v2.1.1 with: workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v1.1.1 + uses: google-github-actions/setup-gcloud@v2.1.0 # Deletes the instances that has been recently deployed in the actual commit after all # previous jobs have run, no matter the outcome of the job. diff --git a/.github/workflows/sub-find-cached-disks.yml b/.github/workflows/sub-find-cached-disks.yml index 3d0182c45a8..35a48194097 100644 --- a/.github/workflows/sub-find-cached-disks.yml +++ b/.github/workflows/sub-find-cached-disks.yml @@ -1,3 +1,10 @@ +# Check if Cached State Disks Exist Workflow +# This workflow is designed to check the availability of cached state disks in Google Cloud Platform (GCP) for different types of Zcash applications. +# - Accepts network type as input to determine which disks to search for. +# - Checks for the existence of three types of disks: lightwalletd tip, Zebra tip, and Zebra checkpoint. +# - Uses Google Cloud SDK to query and identify available disks based on network and version. +# - Outputs the availability of each disk type, which can be utilized in subsequent workflows. +# The workflow streamlines the process of verifying disk availability, crucial for optimizing and speeding up integration tests and deployments. name: Check if cached state disks exist on: @@ -38,14 +45,14 @@ jobs: # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth - uses: google-github-actions/auth@v1.1.1 + uses: google-github-actions/auth@v2.1.1 with: retries: '3' workload_identity_provider: '${{ vars.GCP_WIF }}' service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v1.1.1 + uses: google-github-actions/setup-gcloud@v2.1.0 # Disk images in GCP are required to be in lowercase, but the blockchain network # uses sentence case, so we need to downcase ${{ inputs.network }} @@ -57,20 +64,17 @@ jobs: echo "NETWORK=${NETWORK_CAPS,,}" >> $GITHUB_ENV # Check if there are cached state disks available for subsequent jobs to use. - # - # This lookup uses the state version from constants.rs. - # It accepts disks generated by any branch, including draft and unmerged PRs. - # - # If the disk exists, sets the corresponding output to "true": - # - lwd_tip_disk - # - zebra_tip_disk - # - zebra_checkpoint_disk - name: Check if cached state disks exist id: get-available-disks + env: + GITHUB_WORKSPACE: ${{ env.GITHUB_WORKSPACE }} + NETWORK: ${{ env.NETWORK }} # use lowercase version from env, not input + # TODO: Use the `gcp-get-available-disks.sh` script instead of the inline script, + # as this is crashing. And it might related to the returned JSON values. run: | + # ./.github/workflows/scripts/gcp-get-available-disks.sh LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" "$GITHUB_WORKSPACE/zebra-state/src/constants.rs" | grep -oE "[0-9]+" | tail -n1) echo "STATE_VERSION: $LOCAL_STATE_VERSION" - LWD_TIP_DISK=$(gcloud compute images list --filter="status=READY AND name~lwd-cache-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-tip" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) if [[ -z "$LWD_TIP_DISK" ]]; then echo "No TIP disk found for lightwalletd on network: ${NETWORK}" @@ -79,7 +83,6 @@ jobs: echo "Disk: $LWD_TIP_DISK" echo "lwd_tip_disk=${{ toJSON(true) }}" >> "$GITHUB_OUTPUT" fi - ZEBRA_TIP_DISK=$(gcloud compute images list --filter="status=READY AND name~zebrad-cache-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-tip" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) if [[ -z "$ZEBRA_TIP_DISK" ]]; then echo "No TIP disk found for Zebra on network: ${NETWORK}" @@ -88,7 +91,6 @@ jobs: echo "Disk: $ZEBRA_TIP_DISK" echo "zebra_tip_disk=${{ toJSON(true) }}" >> "$GITHUB_OUTPUT" fi - ZEBRA_CHECKPOINT_DISK=$(gcloud compute images list --filter="status=READY AND name~zebrad-cache-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-checkpoint" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) if [[ -z "$ZEBRA_CHECKPOINT_DISK" ]]; then echo "No CHECKPOINT disk found for Zebra on network: ${NETWORK}" diff --git a/.github/workflows/sub-test-zebra-config.yml b/.github/workflows/sub-test-zebra-config.yml index 6ed561cbd7e..85d2318dfa0 100644 --- a/.github/workflows/sub-test-zebra-config.yml +++ b/.github/workflows/sub-test-zebra-config.yml @@ -1,3 +1,7 @@ +# This workflow is designed to test Zebra configuration files using Docker containers. +# - Runs a specified Docker image with the provided test variables and network settings. +# - Monitors and analyzes container logs for specific patterns to determine test success. +# - Provides flexibility in testing various configurations and networks by dynamically adjusting input parameters. name: Test Zebra Config Files on: @@ -32,7 +36,7 @@ jobs: test-docker-config: name: Test ${{ inputs.test_id }} in Docker timeout-minutes: 30 - runs-on: ubuntu-latest + runs-on: ubuntu-latest-m steps: - uses: actions/checkout@v4.1.1 with: @@ -69,7 +73,15 @@ jobs: # If grep found the pattern, exit with the Docker container exit status if [ $LOGS_EXIT_STATUS -eq 0 ]; then - exit $EXIT_STATUS; + # We can't diagnose or fix these errors, so we're just ignoring them for now. + # They don't actually impact the test because they happen after it succeeds. + # See ticket #7898 for details. + if [ $EXIT_STATUS -eq 137 ] || [ $EXIT_STATUS -eq 139 ]; then + echo "Warning: ignoring docker exit status $EXIT_STATUS"; + exit 0; + else + exit $EXIT_STATUS; + fi fi # Handle other potential errors here diff --git a/.gitignore b/.gitignore index ef29d45439f..b4af6c0b81e 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,8 @@ .zebra-state/ # Nix configs shell.nix +# Docker compose env files +*.env # ---- Below here this is an autogenerated .gitignore using Toptal ---- # Created by https://www.toptal.com/developers/gitignore/api/firebase,emacs,visualstudiocode,rust,windows,macos diff --git a/CHANGELOG.md b/CHANGELOG.md index d46c3ec28a6..6d9a7b8c3c9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,13 +5,239 @@ All notable changes to Zebra are documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org). -## [Zebra 1.4.0](https://github.com/ZcashFoundation/zebra/releases/tag/v1.4.0) - TODO: DATE +## [Zebra 1.6.0](https://github.com/ZcashFoundation/zebra/releases/tag/v1.6.0) - 2024-02-23 -Zebra's mining RPCs are now available in release builds. TODO: rest of intro +This release exposes the shielded scanning functionality through an initial +version of a gRPC server, documented in the [Zebra +Book](https://zebra.zfnd.org/user/shielded-scan-grpc-server.html). -This release contains the following changes: +> [!NOTE] +> Building Zebra now depends on +> [`protoc`](https://github.com/protocolbuffers/protobuf). See the [Build +> Instructions](https://github.com/ZcashFoundation/zebra?tab=readme-ov-file#building-zebra) +> for more details. -### Mining RPCs in Production Builds +### Added + +- Add `docker-compose` file to run CI locally ([#8209](https://github.com/ZcashFoundation/zebra/pull/8209)) +- Allow users to use Zebra + LWD with persistent states ([#8215](https://github.com/ZcashFoundation/zebra/pull/8215)) + +#### Scanner + +- Add a new `zebra-grpc` crate ([#8167](https://github.com/ZcashFoundation/zebra/pull/8167)) +- Start scanner gRPC server with `zebrad` ([#8241](https://github.com/ZcashFoundation/zebra/pull/8241)) +- Add gRPC server reflection and document how to use the gRPC server ([#8288](https://github.com/ZcashFoundation/zebra/pull/8288)) +- Add the `GetInfo` gRPC method ([#8178](https://github.com/ZcashFoundation/zebra/pull/8178)) +- Add the `GetResults` gRPC method ([#8255](https://github.com/ZcashFoundation/zebra/pull/8255)) +- Add the `Scan` gRPC method ([#8268](https://github.com/ZcashFoundation/zebra/pull/8268), [#8303](https://github.com/ZcashFoundation/zebra/pull/8303)) +- Add the `RegisterKeys` gRPC method ([#8266](https://github.com/ZcashFoundation/zebra/pull/8266)) +- Add the `ClearResults` and `DeleteKeys` gRPC methods ([#8237](https://github.com/ZcashFoundation/zebra/pull/8237)) +- Add snapshot tests for new gRPCs ([#8277](https://github.com/ZcashFoundation/zebra/pull/8277)) +- Add unit tests for new gRPCs ([#8293](https://github.com/ZcashFoundation/zebra/pull/8293)) +- Create a tower Service in `zebra-scan` ([#8185](https://github.com/ZcashFoundation/zebra/pull/8185)) +- Implement the `SubscribeResults` scan service request ([#8253](https://github.com/ZcashFoundation/zebra/pull/8253)) +- Implement the `ClearResults` scan service request ([#8219](https://github.com/ZcashFoundation/zebra/pull/8219)) +- Implement the `DeleteKeys` scan service request ([#8217](https://github.com/ZcashFoundation/zebra/pull/8217)) +- Implement the `RegisterKeys` scan service request ([#8251](https://github.com/ZcashFoundation/zebra/pull/8251)) +- Implement the `Results` scan service request ([#8224](https://github.com/ZcashFoundation/zebra/pull/8224)) +- Test the `RegisterKeys` scan service request ([#8281](https://github.com/ZcashFoundation/zebra/pull/8281)) +- Add `ViewingKey` type in `zebra-chain` ([#8198](https://github.com/ZcashFoundation/zebra/pull/8198)) +- Handle `RegisterKeys` messages in scan task ([#8222](https://github.com/ZcashFoundation/zebra/pull/8222)) + +### Changed + +- Remove `rfc.md` file ([#8228](https://github.com/ZcashFoundation/zebra/pull/8228)) +- Update Debian from Bullseye to Bookworm in Docker ([#8273](https://github.com/ZcashFoundation/zebra/pull/8273)) +- Remove Zebra RFCs from `CONTRIBUTING.md` ([#8304](https://github.com/ZcashFoundation/zebra/pull/8304)) +- Publish fewer tags in Docker Hub ([#8300](https://github.com/ZcashFoundation/zebra/pull/8300)) +- Add Zebra crate versions to dev-dependencies and remove circular dev-dependencies ([#8171](https://github.com/ZcashFoundation/zebra/pull/8171)) +- Update docs for building Zebra ([#8315](https://github.com/ZcashFoundation/zebra/pull/8315)) + +### Fixed + +- Set log rotation to avoid docker bugs ([#8269](https://github.com/ZcashFoundation/zebra/pull/8269)) +- Improve error message in `non_blocking_logger` test ([#8276](https://github.com/ZcashFoundation/zebra/pull/8276)) + +### Contributors + +Thank you to everyone who contributed to this release, we couldn't make Zebra without you: +@arya2, @bishopcheckmate, @chairulakmal, @gustavovalverde, @mpguerra, @oxarbitrage and @upbqdn. + +## [Zebra 1.5.2](https://github.com/ZcashFoundation/zebra/releases/tag/v1.5.2) - 2024-01-23 + +This release serves as a hotfix for version 1.5.1, addressing issues encountered after its initial release. For more information about version 1.5.1, refer to [this link](https://github.com/ZcashFoundation/zebra/releases/tag/v1.5.2). + +Following the release on GitHub, we identified difficulties in publishing the `zebra-chain` crate to crates.io. Detailed information is available in [issue #8180](https://github.com/ZcashFoundation/zebra/issues/8180) and its references. + +Unfortunately, to resolve this challenge, which involves an unpublished dependency, we had to temporarily remove the internal miner support introduced in version 1.5.1. + +In our efforts to reinstate this feature, we've opened a tracking ticket to monitor the progress of the required code that must be merged into the `equihash` dependency. You can follow the developments in [issue #8183](https://github.com/ZcashFoundation/zebra/issues/8183), which will only be closed once the feature is successfully restored. + +### Breaking Changes + +- Temporally remove the internal miner functionality ([#8184](https://github.com/ZcashFoundation/zebra/pull/8184)) + +### Contributors + +Thank you to everyone who contributed to this release, we couldn't make Zebra without you: +@oxarbitrage + + +## [Zebra 1.5.1](https://github.com/ZcashFoundation/zebra/releases/tag/v1.5.1) - 2024-01-18 + +This release: + +- Adds a utility for reading scanning results, and finalizes the MVP features of the scanner. +- Adds an experimental `internal-miner` feature, which mines blocks within `zebrad`. This feature is only supported on testnet. Use a more efficient GPU or ASIC for mainnet mining. +- Contains many documentation improvements. + +### Added + +- Add an internal Zcash miner to Zebra ([#8136](https://github.com/ZcashFoundation/zebra/pull/8136), [#8150](https://github.com/ZcashFoundation/zebra/pull/8150)) +- Blockchain scanner new features: + - Don't scan and log if we are below sapling height ([#8121](https://github.com/ZcashFoundation/zebra/pull/8121)) + - Restart scanning where left ([#8080](https://github.com/ZcashFoundation/zebra/pull/8080)) + - Add scanning result reader utility ([#8104](https://github.com/ZcashFoundation/zebra/pull/8104), [#8157](https://github.com/ZcashFoundation/zebra/pull/8157)) +- Note default path to config in docs ([#8143](https://github.com/ZcashFoundation/zebra/pull/8143)) +- Document how to add a column family ([#8149](https://github.com/ZcashFoundation/zebra/pull/8149)) + +### Changed + +- Make sure scanner database is accessed using the correct types ([#8112](https://github.com/ZcashFoundation/zebra/pull/8112)) +- Move history tree and value balance to typed column families ([#8115](https://github.com/ZcashFoundation/zebra/pull/8115)) +- Refactor the user documentation for scanning ([#8124](https://github.com/ZcashFoundation/zebra/pull/8124)) +- Refactor user \& dev documentation ([#8145](https://github.com/ZcashFoundation/zebra/pull/8145)) +- Improve feature flag docs ([#8114](https://github.com/ZcashFoundation/zebra/pull/8114)) +- Allow opening the database in a read-only mode ([#8079](https://github.com/ZcashFoundation/zebra/pull/8079)) +- Send all zebrad logs to the journal under systemd ([#7965](https://github.com/ZcashFoundation/zebra/pull/7965)) + +### Fixed + +- Point to a manually created list of Zebra crates in docs ([#8160](https://github.com/ZcashFoundation/zebra/pull/8160)) +- Add shielded-scan.md to the index ([#8095](https://github.com/ZcashFoundation/zebra/pull/8095)) +- Elasticsearch feature, make bulk size the same for testnet and mainnet ([#8127](https://github.com/ZcashFoundation/zebra/pull/8127)) + +### Contributors + +Thank you to everyone who contributed to this release, we couldn't make Zebra without you: +@arya2, @bishopcheckmate, @gustavovalverde, @oxarbitrage, @sandakersmann, @teor2345 and @upbqdn + + +## [Zebra 1.5.0](https://github.com/ZcashFoundation/zebra/releases/tag/v1.5.0) - 2023-11-28 + +This release: +- fixes a panic that was introduced in Zebra v1.4.0, which happens in rare circumstances when reading cached sprout or history trees. +- further improves how Zebra recovers from network interruptions and prevents potential network hangs. +- limits the ability of synthetic nodes to spread throughout the network through Zebra to address some of the Ziggurat red team report. + +As of this release, Zebra requires Rust 1.73 to build. + +Finally, we've added an experimental "shielded-scan" feature and the zebra-scan crate as steps +towards supporting shielded scanning in Zebra. This feature has known security issues. +It is for experimental use only. Ongoing development is tracked in issue [#7728](https://github.com/ZcashFoundation/zebra/issues/7728). + +### Important Security Warning + +Do not use regular or sensitive viewing keys with Zebra's experimental scanning feature. Do not use this +feature on a shared machine. We suggest generating new keys for experimental use. + +### Security + +- security(net): Stop sending peer addresses from version messages directly to the address book ([#7977](https://github.com/ZcashFoundation/zebra/pull/7977)) +- security(net): Limit how many addresses are sent directly to the address book for a single peer address message ([#7952](https://github.com/ZcashFoundation/zebra/pull/7952)) +- security(net): Rate-limit GetAddr responses to avoid sharing the entire address book over a short period ([#7955](https://github.com/ZcashFoundation/zebra/pull/7955)) + +### Added + +- feat(config): Add config field for the viewing keys used by zebra-scan ([#7949](https://github.com/ZcashFoundation/zebra/pull/7949)) +- feat(scan): Add on-disk database to store keys and scan results ([#7942](https://github.com/ZcashFoundation/zebra/pull/7942), [#8036](https://github.com/ZcashFoundation/zebra/pull/8036)) +- feat(scan): Spawn zebra-scan task from zebrad with configured viewing keys ([#7989](https://github.com/ZcashFoundation/zebra/pull/7989)) +- feat(scan): Create a scan_block function to use across scanning tasks ([#7994](https://github.com/ZcashFoundation/zebra/pull/7994)) +- feat(scan): Scan blocks with Sapling keys and write the results to the database ([#8040](https://github.com/ZcashFoundation/zebra/pull/8040)) +- poc(scan): Proof of concept for shielded scanning ([#7758](https://github.com/ZcashFoundation/zebra/pull/7758)) +- add(docker): Add `ldb` RocksDB query tool to the Dockerfile ([#8074](https://github.com/ZcashFoundation/zebra/pull/8074)) + +### Changed + +- change(state): Expose ZebraDb methods that can create different kinds of databases ([#8002](https://github.com/ZcashFoundation/zebra/pull/8002)) +- change(state): Make the types for finalized blocks consistent ([#7923](https://github.com/ZcashFoundation/zebra/pull/7923)) +- change(scan): Create a scanner storage database ([#8031](https://github.com/ZcashFoundation/zebra/pull/8031)) +- change(scan): Store scanned TXIDs in "display order" ([#8057](https://github.com/ZcashFoundation/zebra/pull/8057)) +- change(scan): Create a function that scans one block by height, and stores the results in the database ([#8045](https://github.com/ZcashFoundation/zebra/pull/8045)) +- change(scan): Store one transaction ID per database row, to make queries easier ([#8062](https://github.com/ZcashFoundation/zebra/pull/8062)) +- change(log): Silence verbose failed connection logs ([#8072](https://github.com/ZcashFoundation/zebra/pull/8072)) + +### Fixed + +- fix(db): Fix a sprout/history tree read panic in Zebra v1.4.0, which only happens before the 25.3.0 state upgrade completes ([#7972](https://github.com/ZcashFoundation/zebra/pull/7972)) +- fix(net): Fix potential network hangs, and reduce code complexity ([#7859](https://github.com/ZcashFoundation/zebra/pull/7859)) +- fix(scan): Start scanning task only if there are keys to scan ([#8059](https://github.com/ZcashFoundation/zebra/pull/8059)) +- fix(rpc): Make the `verbose` argument of the `getrawtransaction` RPC optional ([#8076](https://github.com/ZcashFoundation/zebra/pull/8076)) + +### Contributors + +Thank you to everyone who contributed to this release, we couldn't make Zebra without you: +@arya2, @oxarbitrage, @teor2345 and @upbqdn + +## [Zebra 1.4.0](https://github.com/ZcashFoundation/zebra/releases/tag/v1.4.0) - 2023-11-07 + +Zebra's mining RPCs are now available in release builds. Our Docker images are significantly +smaller, because the smaller Zcash verification parameters are now built into the `zebrad` binary. +Zebra has updated to the shared Rust dependencies from the `zcashd` 5.7.0 release. + +Zebra recovers better from brief network interruptions, and avoids some network and verification +denial of service and performance issues. We have restored our macOS tests in CI, and now support +macOS on a best-effort basis. + +We have changed our documentation website URL, and we are considering deprecating some Docker image +tags in release 1.5.0 and later. + +### Deprecation Warnings + +This release has the following deprecation warnings: + +#### Warning: Deprecation of DockerHub Image Tags in a future release + +Zebra currently publishes 11 [DockerHub tags](https://hub.docker.com/r/zfnd/zebra/tags) for each new release. +We want to reduce the number of DockerHub tags we publish in a future minor Zebra release. + +Based on usage and user feedback, we could stop publishing: +- The `1` tag, which updates each release until NU6 +- The `1.x` tag, which updates each patch release until the next minor release +- The `1.x.y` tag, which is the same as `v1.x.y` +- The `sha-xxxxxxx` tag, which is the same as `v1.x.y` (for production releases) + +We also want to standardise experimental image tags to `-experimental`, rather than `.experimental`. + +So for release 1.5.0, we might only publish these tags: +- `latest` +- `latest-experimental` (a new tag) +- `v1.5.0` +- `v1.5.0-experimental` + +Please let us know if you need any other tags by [opening a GitHub ticket](https://github.com/ZcashFoundation/zebra/issues/new?assignees=&labels=C-enhancement%2CS-needs-triage&projects=&template=feature_request.yml&title=feature%3A+). + +We recommend using the `latest` tag to always get the most recent Zebra release. + +#### Warning: Documentation Website URL Change + +We have replaced the API documentation on the [doc.zebra.zfnd.org](https://doc.zebra.zfnd.org) +website with [docs.rs](https://docs.rs/releases/search?query=zebra). All links have been updated. + +Zebra's API documentation can be found on: +- [`docs.rs`](https://docs.rs/releases/search?query=zebra), which renders documentation for the + public API of the latest crate releases; +- [`doc-internal.zebra.zfnd.org`](https://doc-internal.zebra.zfnd.org/), which renders + documentation for the internal API on the `main` branch. + +[doc.zebra.zfnd.org](https://doc.zebra.zfnd.org) stopped being updated a few days before this release, +and it will soon be shut down. + +### Significant Changes + +This release contains the following significant changes: + +#### Mining RPCs in Production Builds Zebra's mining RPCs are now available in release builds (#7740). Any Zebra instance can be used by a solo miner or mining pool. This stabilises 12 RPCs, including `getblocktemplate`, `submitblock`, @@ -21,10 +247,12 @@ read our [mining blog post](https://zfnd.org/experimental-mining-support-in-zebr Please [let us know](https://github.com/ZcashFoundation/zebra/issues/new?assignees=&labels=C-enhancement%2CS-needs-triage&projects=&template=feature_request.yml&title=feature%3A+) if your mining pool needs extra RPC methods or fields. -### Parameters in Binary +#### Zcash Parameters in `zebrad` Binary `zebrad` now bundles zk-SNARK parameters directly into its binary. This increases the binary size -by a few megabytes, but these parameters do not need to be downloaded or stored separately. +by a few megabytes, but reduces the size of the Docker image by around 600 MB because +the parameters don't contain the Sprout proving key anymore. The `zebrad download` +command does nothing, so it has been removed. Previously, parameters were stored by default in these locations: @@ -40,9 +268,44 @@ so it can't be used to retry failed downloads in `zebrad` 1.3.0 and earlier. We recommend upgrading to the latest Zebra release to avoid download issues in new installs. +#### macOS Support + +macOS x86_64 is now supported on a best-effort basis. macOS builds and some tests run in Zebra's CI. + ### Security -TODO: rest of changelog +- Reconnect with peers after brief network interruption ([#7853](https://github.com/ZcashFoundation/zebra/pull/7853)) +- Add outer timeouts for critical network operations to avoid hangs ([#7869](https://github.com/ZcashFoundation/zebra/pull/7869)) +- Set iterator read bounds where possible in DiskDb, to avoid a known RocksDB denial of service issue ([#7731](https://github.com/ZcashFoundation/zebra/pull/7731), [#7732](https://github.com/ZcashFoundation/zebra/pull/7732)) +- Fix concurrency issues in tree key formats, and CPU usage in genesis tree roots ([#7392](https://github.com/ZcashFoundation/zebra/pull/7392)) + +### Removed + +- Remove the `zebrad download` command, because it no longer does anything ([#7819](https://github.com/ZcashFoundation/zebra/pull/7819)) + +### Added + +- Enable mining RPCs by default in production builds ([#7740](https://github.com/ZcashFoundation/zebra/pull/7740)) +- Re-enable macOS builds and tests in CI ([#7834](https://github.com/ZcashFoundation/zebra/pull/7834)) +- Make macOS x86_64 a tier 2 supported platform in the docs ([#7843](https://github.com/ZcashFoundation/zebra/pull/7843)) +- Add macOS M1 as a tier 3 supported platform ([#7851](https://github.com/ZcashFoundation/zebra/pull/7851)) + +### Changed + +- Build Sprout and Sapling parameters into the zebrad binary, so a download server isn't needed ([#7800](https://github.com/ZcashFoundation/zebra/pull/7800), [#7844](https://github.com/ZcashFoundation/zebra/pull/7844)) +- Bump ECC dependencies for `zcashd` 5.7.0 ([#7784](https://github.com/ZcashFoundation/zebra/pull/7784)) +- Refactor the installation instructions for the `s-nomp` mining pool software ([#7835](https://github.com/ZcashFoundation/zebra/pull/7835)) + +### Fixed + +- Make the `latest` Docker tag point to the production build, rather than the build with experimental features ([#7817](https://github.com/ZcashFoundation/zebra/pull/7817)) +- Fix an incorrect consensus-critical ZIP 212 comment ([#7774](https://github.com/ZcashFoundation/zebra/pull/7774)) +- Fix broken links to `zebra_network` and `zebra_state` `Config` structs on doc-internal.zebra.zfnd.org ([#7838](https://github.com/ZcashFoundation/zebra/pull/7838)) + +### Contributors + +Thank you to everyone who contributed to this release, we couldn't make Zebra without you: +@arya2, @gustavovalverde, @mpguerra, @oxarbitrage, @rex4539, @teor2345, @upbqdn, and @vuittont60. ## [Zebra 1.3.0](https://github.com/ZcashFoundation/zebra/releases/tag/v1.3.0) - 2023-10-16 @@ -992,12 +1255,12 @@ When there are a lot of large user-generated transactions on the network, Zebra ### Configuration Changes -- Split the checkpoint and full verification [`sync` concurrency options](https://doc.zebra.zfnd.org/zebrad/config/struct.SyncSection.html) (#4726, #4758): +- Split the checkpoint and full verification [`sync` concurrency options](https://docs.rs/zebrad/latest/zebrad/components/sync/struct.Config.html) (#4726, #4758): - Add a new `full_verify_concurrency_limit` - Rename `max_concurrent_block_requests` to `download_concurrency_limit` - Rename `lookahead_limit` to `checkpoint_verify_concurrency_limit` For backwards compatibility, the old names are still accepted as aliases. -- Add a new `parallel_cpu_threads` [`sync` concurrency option](https://doc.zebra.zfnd.org/zebrad/config/struct.SyncSection.html) (#4776). +- Add a new `parallel_cpu_threads` [`sync` concurrency option](https://docs.rs/zebrad/latest/zebrad/components/sync/struct.Config.html) (#4776). This option sets the number of threads to use for CPU-bound tasks, such as proof and signature verification. By default, Zebra uses all available CPU cores. @@ -1534,7 +1797,7 @@ Zebra's latest beta fixes a `cargo install` build failure in the previous beta r #### Logging -- Log hashes as hex strings in block committment errors (#4021) +- Log hashes as hex strings in block commitment errors (#4021) #### Tests @@ -1576,7 +1839,7 @@ As part of the RPC changes, we made performance improvements to cached state acc #### Others - Added `TransactionsByMinedId` to mempool (#3907) -- Added code owners and automatic review assigment to the repository (#3677 #3708 #3718) +- Added code owners and automatic review assignment to the repository (#3677 #3708 #3718) - Validate ZIP-212 grace period blocks using checkpoints (#3889) - Store Sapling and Orchard note commitment trees in finalized and non-finalized state (#3818) - Get addresses from transparent outputs (#3802) @@ -2299,7 +2562,7 @@ Zebra's latest alpha continues our work on NU5, including Orchard and Transactio - Test consensus-critical Amount deserialization (#2487) - Update to use new GitHub action names in Google Cloud workflows (#2533) -- Add test intialization helper function for tests (#2539) +- Add test initialization helper function for tests (#2539) ### Changed @@ -2892,7 +3155,7 @@ Some notable changes include: ### Changed - Export new precompute api in zebra-script (#1493) -- Rewrite peer block request hander to match the zcashd implementation (#1518) +- Rewrite peer block request handler to match the zcashd implementation (#1518) ### Fixed - Avoid panics when there are multiple failures on the same connection (#1600) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7d8ce532628..726eb26017c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -3,7 +3,6 @@ * [Running and Debugging](#running-and-debugging) * [Bug Reports](#bug-reports) * [Pull Requests](#pull-requests) -* [Zebra RFCs](#zebra-rfcs) ## Running and Debugging [running-and-debugging]: #running-and-debugging @@ -49,40 +48,3 @@ to the "re-run jobs" button on the `Coverage (+nightly)` CI job's tab To access a report download and extract the zip artifact then open the top level `index.html`. - -## Zebra RFCs -[zebra-rfcs]: #zebra-rfcs - -Significant changes to the Zebra codebase are planned using Zebra RFCs. These -allow structured discussion about a proposed change and provide a record of -the planned design. - -To make a Zebra RFC: - -1. Choose a short feature name like `my-feature`. - -2. Copy the `book/src/dev/rfcs/0000-template.md` file to -`book/src/dev/rfcs/drafts/xxxx-my-feature.md`. - -3. Edit the template header to add the feature name and the date, but leave -the other fields blank for now. - -4. Write the design! The template has a suggested list of sections that are a -useful guide. - -5. Create an design PR using the RFC template. - -6. After creating an RFC PR, update the RFC header and the PR description -with the PR number. - -7. Make changes to the RFC in collaboration with the Zebra team. - -8. When the RFC is merged, take the next available RFC number (not conflicting -with any existing RFCs or design PRs) and name the RFC file accordingly, e.g., -`0027-my-feature.md` for number 27. - -9. Make sure that `book/src/SUMMARY.md` links to the new number for the RFC. - -10. After the RFC is accepted, create an issue for the implementation of the -design, and update the RFC header and PR description with the implementation -issue number. diff --git a/Cargo.lock b/Cargo.lock index 4e3c6484003..95de4bcf116 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,18 +12,18 @@ dependencies = [ "arc-swap", "backtrace", "canonical-path", - "clap 4.4.6", + "clap 4.5.1", "color-eyre", "fs-err", "once_cell", "regex", "secrecy", - "semver 1.0.20", + "semver 1.0.22", "serde", "termcolor", "toml 0.5.11", "tracing", - "tracing-log", + "tracing-log 0.1.4", "tracing-subscriber", "wait-timeout", ] @@ -68,9 +68,9 @@ dependencies = [ [[package]] name = "aes" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ "cfg-if 1.0.0", "cipher", @@ -79,14 +79,15 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.3" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" dependencies = [ "cfg-if 1.0.0", - "getrandom 0.2.10", + "getrandom 0.2.12", "once_cell", "version_check", + "zerocopy", ] [[package]] @@ -136,9 +137,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.4" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" +checksum = "96b09b5178381e0874812a9b157f7fe84982617e48f71f4e3235482775e5b540" dependencies = [ "anstyle", "anstyle-parse", @@ -150,43 +151,43 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.4" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" +checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" [[package]] name = "anstyle-parse" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140" +checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.1" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" +checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" dependencies = [ "anstyle", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1" [[package]] name = "arc-swap" @@ -208,9 +209,9 @@ checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "async-compression" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f658e2baef915ba0f26f1f7c42bfb8e12f532a01f449a090ded75ae7a07e9ba2" +checksum = "a116f46a969224200a0a97f29cfd4c50e7534e4b4826bd23ea2c3c533039c82c" dependencies = [ "flate2", "futures-core", @@ -238,18 +239,18 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.50", ] [[package]] name = "async-trait" -version = "0.1.73" +version = "0.1.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" +checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.50", ] [[package]] @@ -343,9 +344,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.4" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64ct" @@ -391,24 +392,24 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.65.1" +version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" +checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.4.2", "cexpr", "clang-sys", + "itertools 0.12.1", "lazy_static", "lazycell", "log", - "peeking_take_while", "prettyplease", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "syn 2.0.38", + "syn 2.0.50", "which", ] @@ -449,9 +450,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.1" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" [[package]] name = "bitflags-serde-legacy" @@ -459,7 +460,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b64e60c28b6d25ad92e8b367801ff9aa12b41d05fc8798055d296bace4a60cc" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "serde", ] @@ -540,9 +541,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.7.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c79ad7fb2dd38f3dabd76b09c6a5a20c038fc0213ef1e9afd30eb777f120f019" +checksum = "c48f0051a4b4c5e0b6d365cd04af53aeaa209e3cc15ec2cdb69e73cc87fbd0dc" dependencies = [ "memchr", "serde", @@ -550,9 +551,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.14.0" +version = "3.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" +checksum = "a3b1be7772ee4501dba05acbe66bb1e8760f6a6c474a36035631638e4415f130" [[package]] name = "byte-slice-cast" @@ -562,9 +563,9 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "bytemuck" -version = "1.14.0" +version = "1.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374d28ec25809ee0e23827c2ab573d729e293f281dfe393500e7ad618baa61c6" +checksum = "a2ef034f05691a48569bd920a96c81b9d91bbad1ab5ac7c4616c1f6ef36cb79f" [[package]] name = "byteorder" @@ -589,12 +590,44 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "camino" +version = "1.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" +dependencies = [ + "serde", +] + [[package]] name = "canonical-path" version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6e9e01327e6c86e92ec72b1c798d4a94810f147209bbe3ffab6a86954937a6f" +[[package]] +name = "cargo-platform" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "694c8807f2ae16faecc43dc17d74b3eb042482789fd0eb64b39a2e04e087053f" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" +dependencies = [ + "camino", + "cargo-platform", + "semver 1.0.22", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "cast" version = "0.3.0" @@ -612,11 +645,10 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.83" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "7f9fa1897e4325be0d68d48df6aa1a71ac2ed4d27723887e7754192705350730" dependencies = [ - "jobserver", "libc", ] @@ -667,22 +699,22 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.31" +version = "0.4.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" +checksum = "5bc015644b92d5890fab7489e49d21f879d5c990186827d42ec511919404f38b" dependencies = [ "android-tzdata", "iana-time-zone", "num-traits", "serde", - "windows-targets 0.48.5", + "windows-targets 0.52.0", ] [[package]] name = "ciborium" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "effd91f6c78e5a4ace8a5d3c0b6bfaec9e2baaef55f3efc00e45fb2e477ee926" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" dependencies = [ "ciborium-io", "ciborium-ll", @@ -691,15 +723,15 @@ dependencies = [ [[package]] name = "ciborium-io" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdf919175532b369853f5d5e20b26b43112613fd6fe7aee757e35f7a44642656" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" [[package]] name = "ciborium-ll" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" dependencies = [ "ciborium-io", "half", @@ -718,9 +750,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" +checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" dependencies = [ "glob", "libc", @@ -744,9 +776,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.6" +version = "4.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d04704f56c2cde07f43e8e2c154b43f216dc5c92fc98ada720177362f953b956" +checksum = "c918d541ef2913577a0f9566e9ce27cb35b6df072075769e0b26cb5a554520da" dependencies = [ "clap_builder", "clap_derive", @@ -754,33 +786,33 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.6" +version = "4.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e231faeaca65ebd1ea3c737966bf858971cd38c3849107aa3ea7de90a804e45" +checksum = "9f3e7391dad68afb0c2ede1bf619f579a3dc9c2ec67f089baa397123a2f3d1eb" dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim 0.10.0", + "strsim 0.11.0", ] [[package]] name = "clap_derive" -version = "4.4.2" +version = "4.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0862016ff20d69b84ef8247369fabf5c008a7417002411897d40ee1f4532b873" +checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47" dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.50", ] [[package]] name = "clap_lex" -version = "0.5.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "codespan-reporting" @@ -803,19 +835,19 @@ dependencies = [ "eyre", "indenter", "once_cell", - "owo-colors", + "owo-colors 3.5.0", "tracing-error", "url", ] [[package]] name = "color-spantrace" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ba75b3d9449ecdccb27ecbc479fdc0b87fa2dd43d2f8298f9bf0e59aacc8dce" +checksum = "cd6be1b2a7e382e2b98b43b2adcca6bb0e465af0bdd38123873ae61eb17a72c2" dependencies = [ "once_cell", - "owo-colors", + "owo-colors 3.5.0", "tracing-core", "tracing-error", ] @@ -828,15 +860,15 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "console" -version = "0.15.7" +version = "0.15.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8" +checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" dependencies = [ "encode_unicode", "lazy_static", "libc", "unicode-width", - "windows-sys 0.45.0", + "windows-sys 0.52.0", ] [[package]] @@ -848,7 +880,7 @@ dependencies = [ "futures-core", "prost", "prost-types", - "tonic", + "tonic 0.10.2", "tracing-core", ] @@ -870,12 +902,18 @@ dependencies = [ "thread_local", "tokio", "tokio-stream", - "tonic", + "tonic 0.10.2", "tracing", "tracing-core", "tracing-subscriber", ] +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + [[package]] name = "constant_time_eq" version = "0.3.0" @@ -884,9 +922,9 @@ checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" [[package]] name = "core-foundation" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ "core-foundation-sys", "libc", @@ -894,24 +932,24 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "cpufeatures" -version = "0.2.9" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" dependencies = [ "libc", ] [[package]] name = "crc32fast" -version = "1.3.2" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" dependencies = [ "cfg-if 1.0.0", ] @@ -925,7 +963,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.4.6", + "clap 4.5.1", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -954,46 +992,37 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.8" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" +checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b" dependencies = [ - "cfg-if 1.0.0", "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "cfg-if 1.0.0", "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" -version = "0.9.15" +version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "autocfg", - "cfg-if 1.0.0", "crossbeam-utils", - "memoffset", - "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.16" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" -dependencies = [ - "cfg-if 1.0.0", -] +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" [[package]] name = "crunchy" @@ -1013,9 +1042,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.1.1" +version = "4.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" +checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" dependencies = [ "cfg-if 1.0.0", "cpufeatures", @@ -1031,13 +1060,13 @@ dependencies = [ [[package]] name = "curve25519-dalek-derive" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.50", ] [[package]] @@ -1054,14 +1083,14 @@ dependencies = [ [[package]] name = "cxx-gen" -version = "0.7.109" +version = "0.7.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1318697052dbc5a12f8e5e603441413d6096350c29b8361eb45a9c531be61dda" +checksum = "54b629c0d006c7e44c1444dd17d18a458c9390d32276b758ac7abd21a75c99b0" dependencies = [ "codespan-reporting", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.50", ] [[package]] @@ -1078,7 +1107,7 @@ checksum = "2fa16a70dd58129e4dfffdff535fb1bce66673f7bbeec4a5a1765a504e1ccd84" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.50", ] [[package]] @@ -1093,12 +1122,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.3" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0209d94da627ab5605dcccf08bb18afa5009cfbef48d8a8b7d7bdbc79be25c5e" +checksum = "c376d08ea6aa96aafe61237c7200d1241cb177b7d3a542d791f2d118e9cbb955" dependencies = [ - "darling_core 0.20.3", - "darling_macro 0.20.3", + "darling_core 0.20.6", + "darling_macro 0.20.6", ] [[package]] @@ -1117,16 +1146,16 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.3" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "177e3443818124b357d8e76f53be906d60937f0d3a90773a664fa63fa253e621" +checksum = "33043dcd19068b8192064c704b3f83eb464f91f1ff527b44a4e2b08d9cdb8855" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.38", + "syn 2.0.50", ] [[package]] @@ -1142,13 +1171,13 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.20.3" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" +checksum = "c5a91391accf613803c2a9bf9abccdbaa07c54b4244a5b64883f9c3c137c86be" dependencies = [ - "darling_core 0.20.3", + "darling_core 0.20.6", "quote", - "syn 2.0.38", + "syn 2.0.50", ] [[package]] @@ -1161,12 +1190,23 @@ dependencies = [ "uuid", ] +[[package]] +name = "der" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" +dependencies = [ + "const-oid", + "zeroize", +] + [[package]] name = "deranged" -version = "0.3.8" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ + "powerfmt", "serde", ] @@ -1210,21 +1250,22 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.50", ] [[package]] name = "dyn-clone" -version = "1.0.14" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d2f3407d9a573d666de4b5bdf10569d73ca9478087346697dcbae6244bfbcd" +checksum = "545b22097d44f8a9581187cdf93de7a71e4722bf51200cfaba810865b49a495d" [[package]] name = "ed25519" -version = "2.2.2" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60f6d271ca33075c88028be6f04d502853d63a5ece419d269c15315d4fc1cf1d" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ + "pkcs8", "serde", "signature", ] @@ -1237,7 +1278,7 @@ checksum = "7d9ce6874da5d4415896cd45ffbc4d1cfc0c4f9c079427bd870742c30f2f65a9" dependencies = [ "curve25519-dalek", "ed25519", - "hashbrown 0.14.1", + "hashbrown 0.14.3", "hex", "rand_core 0.6.4", "serde", @@ -1247,9 +1288,9 @@ dependencies = [ [[package]] name = "either" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" [[package]] name = "elasticsearch" @@ -1314,19 +1355,19 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3e13f66a2f95e32a39eaa81f6b95d42878ca0e1db0c7543723dfe12557e860" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "eyre" -version = "0.6.8" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c2b6b5a29c02cdc822728b7d7b8ae1bab3e3b05d44522770ddd49722eeac7eb" +checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" dependencies = [ "indenter", "once_cell", @@ -1360,9 +1401,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.2.1" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0870c84016d4b481be5c9f323c24f65e31e901ae618f0e80f4308fb00de1d2d" +checksum = "1676f435fc1dadde4d03e43f5d62b259e1ce5f40bd4ffb21db2b42ebe59c1382" [[package]] name = "fixed-hash" @@ -1413,9 +1454,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "form_urlencoded" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ "percent-encoding", ] @@ -1436,9 +1477,12 @@ dependencies = [ [[package]] name = "fs-err" -version = "2.9.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0845fa252299212f0389d64ba26f34fa32cfe41588355f21ed507c59a0f64541" +checksum = "88a41f105fe1d5b6b34b2055e3dc59bb79b46b48b2040b9e6c7b4b5de097aa41" +dependencies = [ + "autocfg", +] [[package]] name = "funty" @@ -1448,9 +1492,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -1463,9 +1507,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -1473,15 +1517,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -1490,38 +1534,38 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-macro" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.50", ] [[package]] name = "futures-sink" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-util" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", @@ -1558,9 +1602,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.10" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" +checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -1571,17 +1615,17 @@ dependencies = [ [[package]] name = "gimli" -version = "0.28.0" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" [[package]] name = "git2" -version = "0.18.1" +version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbf97ba92db08df386e10c8ede66a2a0369bd277090afd8710e19e38de9ec0cd" +checksum = "1b3ba52851e73b46a4c3df1d89343741112003f0f6f13beb0dfac9e457c3fdcd" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "libc", "libgit2-sys", "log", @@ -1596,15 +1640,15 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "globset" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759c97c1e17c55525b57192c06a267cda0ac5210b222d6b82189a2338fa1c13d" +checksum = "57da3b9b5b85bd66f31093f8c408b90a74431672542466497dcbdfdc02034be1" dependencies = [ "aho-corasick", "bstr", - "fnv", "log", - "regex", + "regex-automata 0.4.5", + "regex-syntax 0.8.2", ] [[package]] @@ -1621,9 +1665,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91fc23aa11be92976ef4729127f1a74adf36d8436f7816b185d18df956790833" +checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" dependencies = [ "bytes", "fnv", @@ -1631,18 +1675,22 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 1.9.3", + "indexmap 2.2.3", "slab", "tokio", - "tokio-util 0.7.9", + "tokio-util 0.7.10", "tracing", ] [[package]] name = "half" -version = "1.8.2" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" +checksum = "bc52e53916c08643f1b56ec082790d1e86a32e58dc5268f897f313fbae7b4872" +dependencies = [ + "cfg-if 1.0.0", + "crunchy", +] [[package]] name = "halo2_gadgets" @@ -1692,18 +1740,9 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ff8ae62cd3a9102e5637afc8452c55acf3844001bd5374e0b0bd7b6616c038" -dependencies = [ - "ahash", -] - -[[package]] -name = "hashbrown" -version = "0.14.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dfda62a12f55daeae5015f81b0baea145391cb4520f86c248fc615d72640d12" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ "ahash", "allocator-api2", @@ -1711,11 +1750,11 @@ dependencies = [ [[package]] name = "hdrhistogram" -version = "7.5.2" +version = "7.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f19b9f54f7c7f55e31401bb647626ce0cf0f67b0004982ce815b3ee72a02aa8" +checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d" dependencies = [ - "base64 0.13.1", + "base64 0.21.7", "byteorder", "flate2", "nom", @@ -1730,7 +1769,7 @@ checksum = "5a03ba7d4c9ea41552cd4351965ff96883e629693ae85005c501bb4b9e1c48a7" dependencies = [ "lazy_static", "rand_core 0.6.4", - "ring", + "ring 0.16.20", "secp256k1", "thiserror", ] @@ -1761,9 +1800,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.3" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" +checksum = "bd5256b483761cd23699d0da46cc6fd2ee3be420bbe6d020ae4a091e70b7e9fd" [[package]] name = "hex" @@ -1791,11 +1830,11 @@ dependencies = [ [[package]] name = "home" -version = "0.5.5" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -1822,9 +1861,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.9" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" +checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" dependencies = [ "bytes", "fnv", @@ -1833,9 +1872,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", "http", @@ -1872,9 +1911,9 @@ dependencies = [ [[package]] name = "hyper" -version = "0.14.27" +version = "0.14.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" dependencies = [ "bytes", "futures-channel", @@ -1887,7 +1926,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.9", + "socket2", "tokio", "tower-service", "tracing", @@ -1896,9 +1935,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http", @@ -1922,16 +1961,16 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.57" +version = "0.1.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" +checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows", + "windows-core", ] [[package]] @@ -1951,9 +1990,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -2007,20 +2046,20 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.2" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" +checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" dependencies = [ "equivalent", - "hashbrown 0.14.1", + "hashbrown 0.14.3", "serde", ] [[package]] name = "indicatif" -version = "0.17.7" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb28741c9db9a713d93deb3bb9515c20788cef5815265bee4980e87bde7e0f25" +checksum = "763a5a8f45087d6bcea4222e7b72c291a054edf80e4ef6efd2a4979878c7bea3" dependencies = [ "console", "instant", @@ -2031,9 +2070,9 @@ dependencies = [ [[package]] name = "inferno" -version = "0.11.17" +version = "0.11.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c50453ec3a6555fad17b1cd1a80d16af5bc7cb35094f64e429fd46549018c6a3" +checksum = "321f0f839cd44a4686e9504b0a62b4d69a50b62072144c71c68f5873c167b8d9" dependencies = [ "ahash", "is-terminal", @@ -2057,9 +2096,9 @@ dependencies = [ [[package]] name = "insta" -version = "1.34.0" +version = "1.35.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d64600be34b2fcfc267740a243fa7744441bb4947a619ac4e5bb6507f35fbfc" +checksum = "7c985c1bef99cf13c58fade470483d81a2bfe846ebde60ed28cc2dddec2df9e2" dependencies = [ "console", "lazy_static", @@ -2083,19 +2122,19 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] name = "is-terminal" -version = "0.4.9" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" +checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" dependencies = [ - "hermit-abi 0.3.3", - "rustix", - "windows-sys 0.48.0", + "hermit-abi 0.3.6", + "libc", + "windows-sys 0.52.0", ] [[package]] @@ -2116,28 +2155,39 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + [[package]] name = "itoa" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" [[package]] -name = "jobserver" -version = "0.1.27" +name = "js-sys" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" +checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" dependencies = [ - "libc", + "wasm-bindgen", ] [[package]] -name = "js-sys" -version = "0.3.64" +name = "jsonrpc" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +checksum = "a26d9104d516092f092d97448787505881fdb6518293b2d6500bf9c180c839dd" dependencies = [ - "wasm-bindgen", + "base64 0.13.1", + "serde", + "serde_json", ] [[package]] @@ -2217,11 +2267,11 @@ dependencies = [ [[package]] name = "known-folders" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b6f1427d9c43b1cce87434c4d9eca33f43bdbb6246a762aa823a582f74c1684" +checksum = "4397c789f2709d23cfcb703b316e0766a8d4b17db2d47b0ab096ef6047cae1d8" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -2241,15 +2291,15 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.149" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libgit2-sys" -version = "0.16.1+1.7.1" +version = "0.16.2+1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2a2bb3680b094add03bb3732ec520ece34da31a8cd2d633d1389d0f0fb60d0c" +checksum = "ee4126d8b4ee5c9d9ea891dd875cfdc1e9d0950437179104b183d7d8a74d24e8" dependencies = [ "cc", "libc", @@ -2259,12 +2309,12 @@ dependencies = [ [[package]] name = "libloading" -version = "0.7.4" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" +checksum = "c571b676ddfc9a8c12f1f3d3085a7b163966a8fd8098a90640953ce5f6170161" dependencies = [ "cfg-if 1.0.0", - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -2273,11 +2323,22 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +[[package]] +name = "libredox" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" +dependencies = [ + "bitflags 2.4.2", + "libc", + "redox_syscall 0.4.1", +] + [[package]] name = "librocksdb-sys" -version = "0.11.0+8.1.1" +version = "0.16.0+8.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3386f101bcb4bd252d8e9d2fb41ec3b0862a15a62b478c355b2982efa469e3e" +checksum = "ce3d60bc059831dc1c83903fb45c103f75db65c5a7bf22272764d9cc683e348c" dependencies = [ "bindgen", "bzip2-sys", @@ -2290,9 +2351,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.12" +version = "1.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b" +checksum = "037731f5d3aaa87a5675e895b63ddff1a87624bc29f77004ea829809654e48f6" dependencies = [ "cc", "libc", @@ -2317,9 +2378,9 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.4.10" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" [[package]] name = "lock_api" @@ -2347,15 +2408,6 @@ dependencies = [ "libc", ] -[[package]] -name = "mach2" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d0d1830bcd151a6fc4aea1369af235b36c1528fe976b8ff678683c9995eade8" -dependencies = [ - "libc", -] - [[package]] name = "match_cfg" version = "0.1.0" @@ -2389,18 +2441,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" - -[[package]] -name = "memoffset" -version = "0.9.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" -dependencies = [ - "autocfg", -] +checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" [[package]] name = "memuse" @@ -2422,17 +2465,27 @@ dependencies = [ "portable-atomic", ] +[[package]] +name = "metrics" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd71d9db2e4287c3407fa04378b8c2ee570aebe0854431562cdd89ca091854f4" +dependencies = [ + "ahash", + "portable-atomic", +] + [[package]] name = "metrics-exporter-prometheus" -version = "0.12.1" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a4964177ddfdab1e3a2b37aec7cf320e14169abb0ed73999f558136409178d5" +checksum = "9bf4e7146e30ad172c42c39b3246864bd2d3c6396780711a1baf749cfe423e21" dependencies = [ - "base64 0.21.4", + "base64 0.21.7", "hyper", - "indexmap 1.9.3", + "indexmap 2.2.3", "ipnet", - "metrics", + "metrics 0.22.1", "metrics-util", "quanta", "thiserror", @@ -2441,25 +2494,25 @@ dependencies = [ [[package]] name = "metrics-macros" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" +checksum = "38b4faf00617defe497754acde3024865bc143d44a86799b24e191ecff91354f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.50", ] [[package]] name = "metrics-util" -version = "0.15.1" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4de2ed6e491ed114b40b732e4d1659a9d53992ebd87490c44a6ffe23739d973e" +checksum = "ece71ab046dcf45604e573329966ec1db5ff4b81cfa170a924ff4c959ab5451a" dependencies = [ "crossbeam-epoch", "crossbeam-utils", - "hashbrown 0.13.1", - "metrics", + "hashbrown 0.14.3", + "metrics 0.22.1", "num_cpus", "quanta", "sketches-ddsketch", @@ -2479,18 +2532,18 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" dependencies = [ "adler", ] [[package]] name = "mio" -version = "0.8.8" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" dependencies = [ "libc", "wasi 0.11.0+wasi-snapshot-preview1", @@ -2515,7 +2568,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" dependencies = [ - "getrandom 0.2.10", + "getrandom 0.2.12", ] [[package]] @@ -2566,6 +2619,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + [[package]] name = "num-format" version = "0.4.4" @@ -2578,19 +2637,18 @@ dependencies = [ [[package]] name = "num-integer" -version = "0.1.45" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "autocfg", "num-traits", ] [[package]] name = "num-traits" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ "autocfg", "libm", @@ -2602,15 +2660,15 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.3.3", + "hermit-abi 0.3.6", "libc", ] [[package]] name = "num_threads" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" +checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" dependencies = [ "libc", ] @@ -2623,18 +2681,18 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.32.1" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "oorandom" @@ -2715,6 +2773,12 @@ version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" +[[package]] +name = "owo-colors" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "caff54706df99d2a78a5a4e3455ff45448d81ef1bb63c22cd14052ca0e993a3f" + [[package]] name = "pairing" version = "0.23.0" @@ -2726,9 +2790,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.5" +version = "3.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dec8a8073036902368c2cdc0387e85ff9a37054d7e7c98e592145e0c92cd4fb" +checksum = "881331e34fa842a2fb61cc2db9643a8fedc615e47cfcc52597d1af0db9a7e8fe" dependencies = [ "arrayvec", "bitvec", @@ -2740,11 +2804,11 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.5" +version = "3.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "312270ee71e1cd70289dacf597cab7b207aa107d2f28191c2ae45b2ece18a260" +checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" dependencies = [ - "proc-macro-crate 1.3.1", + "proc-macro-crate 2.0.0", "proc-macro2", "quote", "syn 1.0.109", @@ -2768,7 +2832,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.8", + "parking_lot_core 0.9.9", ] [[package]] @@ -2787,13 +2851,13 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.8" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" +checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall 0.3.5", + "redox_syscall 0.4.1", "smallvec", "windows-targets 0.48.5", ] @@ -2834,23 +2898,17 @@ dependencies = [ "password-hash", ] -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - [[package]] name = "percent-encoding" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.4" +version = "2.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c022f1e7b65d6a24c0dbbd5fb344c66881bc01f3e5ae74a1c8100f2f985d98a4" +checksum = "219c0dcc30b6a27553f9cc242972b67f75b60eb0db71f0b5462f38b058c41546" dependencies = [ "memchr", "thiserror", @@ -2859,9 +2917,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.4" +version = "2.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35513f630d46400a977c4cb58f78e1bfbe01434316e60c37d27b9ad6139c66d8" +checksum = "22e1288dbd7786462961e69bfd4df7848c1e37e8b74303dbdab82c3a9cdd2809" dependencies = [ "pest", "pest_generator", @@ -2869,22 +2927,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.4" +version = "2.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc9fc1b9e7057baba189b5c626e2d6f40681ae5b6eb064dc7c7834101ec8123a" +checksum = "1381c29a877c6d34b8c176e734f35d7f7f5b3adaefe940cb4d1bb7af94678e2e" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.50", ] [[package]] name = "pest_meta" -version = "2.7.4" +version = "2.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df74e9e7ec4053ceb980e7c0c8bd3594e977fde1af91daba9c928e8e8c6708d" +checksum = "d0934d6907f148c22a3acbda520c7eed243ad7487a30f51f6ce52b58b7077a8a" dependencies = [ "once_cell", "pest", @@ -2898,27 +2956,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.0.2", + "indexmap 2.2.3", ] [[package]] name = "pin-project" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" +checksum = "0302c4a0442c456bd56f841aee5c3bfd17967563f6fadc9ceb9f9c23cf3807e0" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" +checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.50", ] [[package]] @@ -2933,17 +2991,27 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + [[package]] name = "pkg-config" -version = "0.3.27" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" [[package]] name = "platforms" -version = "3.1.2" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4503fa043bf02cee09a9582e9554b4c6403b2ef55e4612e96561d294419429f8" +checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" [[package]] name = "plotters" @@ -2986,9 +3054,15 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.4.3" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" + +[[package]] +name = "powerfmt" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31114a898e107c51bb1609ffaf55a0e011cf6a4d7f1170d0015a165082c0338b" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" @@ -2998,12 +3072,12 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "prettyplease" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" +checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" dependencies = [ "proc-macro2", - "syn 2.0.38", + "syn 2.0.50", ] [[package]] @@ -3028,12 +3102,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.3.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" +checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" dependencies = [ - "once_cell", - "toml_edit 0.19.15", + "toml_edit 0.20.7", ] [[package]] @@ -3062,28 +3135,28 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.69" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" +checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" dependencies = [ "unicode-ident", ] [[package]] name = "proptest" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c003ac8c77cb07bb74f5f198bce836a689bcd5a42574612bf14d17bfd08c20e" +checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.4.1", + "bitflags 2.4.2", "lazy_static", "num-traits", "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift", - "regex-syntax 0.7.5", + "regex-syntax 0.8.2", "rusty-fork", "tempfile", "unarray", @@ -3102,9 +3175,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4fdd22f3b9c31b53c060df4a0613a1c7f062d4115a2b984dd15b1858f7e340d" +checksum = "146c289cda302b98a28d40c8b3b90498d6e526dd24ac2ecea73e4e491685b94a" dependencies = [ "bytes", "prost-derive", @@ -3112,9 +3185,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bdf592881d821b83d471f8af290226c8d51402259e9bb5be7f9f8bdebbb11ac" +checksum = "c55e02e35260070b6f716a2423c2ff1c3bb1642ddca6f99e1f26d06268a0e2d2" dependencies = [ "bytes", "heck 0.4.1", @@ -3127,42 +3200,41 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.38", + "syn 2.0.50", "tempfile", "which", ] [[package]] name = "prost-derive" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "265baba7fabd416cf5078179f7d2cbeca4ce7a9041111900675ea7c4cb8a4c32" +checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e" dependencies = [ "anyhow", "itertools 0.11.0", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.50", ] [[package]] name = "prost-types" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e081b29f63d83a4bc75cfc9f3fe424f9156cf92d8a4f0c9407cce9a1b67327cf" +checksum = "193898f59edcf43c26227dcd4c8427f00d99d61e95dcde58dabd49fa291d470e" dependencies = [ "prost", ] [[package]] name = "quanta" -version = "0.11.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" +checksum = "9ca0b7bac0b97248c40bb77288fc52029cf1459c0461ea1b05ee32ccf011de2c" dependencies = [ "crossbeam-utils", "libc", - "mach2", "once_cell", "raw-cpuid", "wasi 0.11.0+wasi-snapshot-preview1", @@ -3210,9 +3282,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.33" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ "proc-macro2", ] @@ -3282,7 +3354,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.10", + "getrandom 0.2.12", ] [[package]] @@ -3305,18 +3377,18 @@ dependencies = [ [[package]] name = "raw-cpuid" -version = "10.7.0" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" +checksum = "9d86a7c4638d42c44551f4791a20e687dbb4c3de1f33c43dd71e355cd429def1" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.4.2", ] [[package]] name = "rayon" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1" +checksum = "fa7237101a77a10773db45d62004a272517633fbcc3df19d96455ede1122e051" dependencies = [ "either", "rayon-core", @@ -3324,9 +3396,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.12.0" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ "crossbeam-deque", "crossbeam-utils", @@ -3374,33 +3446,33 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.3.5" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "redox_users" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" +checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" dependencies = [ - "getrandom 0.2.10", - "redox_syscall 0.2.16", + "getrandom 0.2.12", + "libredox", "thiserror", ] [[package]] name = "regex" -version = "1.10.2" +version = "1.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" +checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.3", + "regex-automata 0.4.5", "regex-syntax 0.8.2", ] @@ -3415,9 +3487,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" +checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" dependencies = [ "aho-corasick", "memchr", @@ -3430,12 +3502,6 @@ version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" -[[package]] -name = "regex-syntax" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" - [[package]] name = "regex-syntax" version = "0.8.2" @@ -3444,12 +3510,12 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "reqwest" -version = "0.11.22" +version = "0.11.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" +checksum = "c6920094eb85afde5e4a138be3f2de8bbdf28000f0029e72c45025a56b042251" dependencies = [ "async-compression", - "base64 0.21.4", + "base64 0.21.7", "bytes", "encoding_rs", "futures-core", @@ -3471,10 +3537,11 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", + "sync_wrapper", "system-configuration", "tokio", "tokio-rustls", - "tokio-util 0.7.9", + "tokio-util 0.7.10", "tower-service", "url", "wasm-bindgen", @@ -3486,9 +3553,9 @@ dependencies = [ [[package]] name = "rgb" -version = "0.8.36" +version = "0.8.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20ec2d3e3fc7a92ced357df9cebd5a10b6fb2aa1ee797bf7e9ce2f17dffc8f59" +checksum = "05aaa8004b64fd573fc9d002f4e632d51ad4f026c2b5ba95fcb6c2f32c2c47d8" dependencies = [ "bytemuck", ] @@ -3503,11 +3570,26 @@ dependencies = [ "libc", "once_cell", "spin 0.5.2", - "untrusted", + "untrusted 0.7.1", "web-sys", "winapi", ] +[[package]] +name = "ring" +version = "0.17.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +dependencies = [ + "cc", + "cfg-if 1.0.0", + "getrandom 0.2.12", + "libc", + "spin 0.9.8", + "untrusted 0.9.0", + "windows-sys 0.52.0", +] + [[package]] name = "ripemd" version = "0.1.3" @@ -3528,9 +3610,9 @@ dependencies = [ [[package]] name = "rocksdb" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb6f170a4041d50a0ce04b0d2e14916d6ca863ea2e422689a5b694395d299ffe" +checksum = "6bd13e55d6d7b8cd0ea569161127567cd587676c99f4472f779a0279aa60a7a7" dependencies = [ "libc", "librocksdb-sys", @@ -3580,51 +3662,51 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.20", + "semver 1.0.22", ] [[package]] name = "rustix" -version = "0.38.20" +version = "0.38.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67ce50cb2e16c2903e30d1cbccfd8387a74b9d4c938b6a4c5ec6cc7556f7a8a0" +checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "errno", "libc", "linux-raw-sys", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "rustls" -version = "0.21.7" +version = "0.21.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" +checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" dependencies = [ "log", - "ring", + "ring 0.17.8", "rustls-webpki", "sct", ] [[package]] name = "rustls-pemfile" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" dependencies = [ - "base64 0.21.4", + "base64 0.21.7", ] [[package]] name = "rustls-webpki" -version = "0.101.6" +version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c7d5dece342910d9ba34d259310cae3e0154b873b35408b787b59bce53d34fe" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring", - "untrusted", + "ring 0.17.8", + "untrusted 0.9.0", ] [[package]] @@ -3647,9 +3729,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.15" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" +checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" [[package]] name = "same-file" @@ -3668,12 +3750,12 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sct" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring", - "untrusted", + "ring 0.17.8", + "untrusted 0.9.0", ] [[package]] @@ -3716,9 +3798,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" +checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" dependencies = [ "serde", ] @@ -3731,9 +3813,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "sentry" -version = "0.31.7" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0097a48cd1999d983909f07cb03b15241c5af29e5e679379efac1c06296abecc" +checksum = "766448f12e44d68e675d5789a261515c46ac6ccd240abdd451a9c46c84a49523" dependencies = [ "httpdate", "reqwest", @@ -3749,9 +3831,9 @@ dependencies = [ [[package]] name = "sentry-backtrace" -version = "0.31.7" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18a7b80fa1dd6830a348d38a8d3a9761179047757b7dca29aef82db0118b9670" +checksum = "32701cad8b3c78101e1cd33039303154791b0ff22e7802ed8cc23212ef478b45" dependencies = [ "backtrace", "once_cell", @@ -3761,9 +3843,9 @@ dependencies = [ [[package]] name = "sentry-contexts" -version = "0.31.7" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7615dc588930f1fd2e721774f25844ae93add2dbe2d3c2f995ce5049af898147" +checksum = "17ddd2a91a13805bd8dab4ebf47323426f758c35f7bf24eacc1aded9668f3824" dependencies = [ "hostname", "libc", @@ -3775,9 +3857,9 @@ dependencies = [ [[package]] name = "sentry-core" -version = "0.31.7" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f51264e4013ed9b16558cce43917b983fa38170de2ca480349ceb57d71d6053" +checksum = "b1189f68d7e7e102ef7171adf75f83a59607fafd1a5eecc9dc06c026ff3bdec4" dependencies = [ "once_cell", "rand 0.8.5", @@ -3788,9 +3870,9 @@ dependencies = [ [[package]] name = "sentry-tracing" -version = "0.31.7" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38033822128e73f7b6ca74c1631cef8868890c6cb4008a291cf73530f87b4eac" +checksum = "3012699a9957d7f97047fd75d116e22d120668327db6e7c59824582e16e791b2" dependencies = [ "sentry-backtrace", "sentry-core", @@ -3800,9 +3882,9 @@ dependencies = [ [[package]] name = "sentry-types" -version = "0.31.7" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e663b3eb62ddfc023c9cf5432daf5f1a4f6acb1df4d78dd80b740b32dd1a740" +checksum = "c7173fd594569091f68a7c37a886e202f4d0c1db1e1fa1d18a051ba695b2e2ec" dependencies = [ "debugid", "hex", @@ -3817,9 +3899,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.189" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" +checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" dependencies = [ "serde_derive", ] @@ -3835,22 +3917,22 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.189" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" +checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.50", ] [[package]] name = "serde_json" -version = "1.0.107" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" +checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" dependencies = [ - "indexmap 2.0.2", + "indexmap 2.2.3", "itoa", "ryu", "serde", @@ -3858,9 +3940,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12022b835073e5b11e90a14f86838ceb1c8fb0325b72416845c487ac0fa95e80" +checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" dependencies = [ "serde", ] @@ -3889,18 +3971,19 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.4.0" +version = "3.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64cd236ccc1b7a29e7e2739f27c0b2dd199804abc4290e32f59f3b68d6405c23" +checksum = "15d167997bd841ec232f5b2b8e0e26606df2e7caa4c31b95ea9ca52b200bd270" dependencies = [ - "base64 0.21.4", + "base64 0.21.7", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.0.2", + "indexmap 2.2.3", "serde", + "serde_derive", "serde_json", - "serde_with_macros 3.4.0", + "serde_with_macros 3.6.1", "time", ] @@ -3918,14 +4001,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.4.0" +version = "3.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93634eb5f75a2323b16de4748022ac4297f9e76b6dced2be287a099f41b5e788" +checksum = "865f9743393e638991566a8b7a479043c2c8da94a33e0a31f18214c9cae0a64d" dependencies = [ - "darling 0.20.3", + "darling 0.20.6", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.50", ] [[package]] @@ -3948,11 +4031,23 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "shardtree" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c19f96dde3a8693874f7e7c53d95616569b4009379a903789efbd448f4ea9cc7" +dependencies = [ + "bitflags 2.4.2", + "either", + "incrementalmerkletree", + "tracing", +] + [[package]] name = "shlex" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7cee0529a6d40f580e7a5e6c495c8fbfe21b7b52795ed4bb5e62cdf92bc6380" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" @@ -3965,21 +4060,24 @@ dependencies = [ [[package]] name = "signature" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "rand_core 0.6.4", +] [[package]] name = "similar" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aeaf503862c419d66959f5d7ca015337d864e9c49485d771b732e2a20453597" +checksum = "32fea41aca09ee824cc9724996433064c89f7777e60762749a4170a14abbfa21" [[package]] name = "sketches-ddsketch" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a406c1882ed7f29cd5e248c9848a80e7cb6ae0fea82346d2746f2f941c07e1" +checksum = "85636c14b73d81f541e525f585c0a2109e6744e1565b5c1668e31c70c10ed65c" [[package]] name = "slab" @@ -3992,25 +4090,15 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.1" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" +checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" [[package]] name = "socket2" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "socket2" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", "windows-sys 0.48.0", @@ -4053,6 +4141,16 @@ dependencies = [ "lock_api", ] +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + [[package]] name = "static_assertions" version = "1.1.0" @@ -4077,6 +4175,12 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +[[package]] +name = "strsim" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" + [[package]] name = "structopt" version = "0.3.26" @@ -4120,9 +4224,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.38" +version = "2.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" +checksum = "74f1bdc9872430ce9b75da68329d1c1746faf50ffac5f19e02b71e37ff881ffb" dependencies = [ "proc-macro2", "quote", @@ -4176,22 +4280,21 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.8.0" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" +checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" dependencies = [ "cfg-if 1.0.0", "fastrand", - "redox_syscall 0.3.5", "rustix", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "termcolor" -version = "1.3.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6093bad37da69aab9d123a8091e4be0aa4a03e4d601ec641c327398315f62b64" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" dependencies = [ "winapi-util", ] @@ -4207,29 +4310,43 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.50" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.50" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.50", +] + +[[package]] +name = "thread-priority" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b72cb4958060ee2d9540cef68bb3871fd1e547037772c7fe7650d5d1cbec53b3" +dependencies = [ + "bitflags 1.3.2", + "cfg-if 1.0.0", + "libc", + "log", + "rustversion", + "winapi", ] [[package]] name = "thread_local" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ "cfg-if 1.0.0", "once_cell", @@ -4237,14 +4354,16 @@ dependencies = [ [[package]] name = "time" -version = "0.3.29" +version = "0.3.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "426f806f4089c493dcac0d24c29c01e2c38baf8e30f1b716ee37e83d200b18fe" +checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" dependencies = [ "deranged", "itoa", "libc", + "num-conv", "num_threads", + "powerfmt", "serde", "time-core", "time-macros", @@ -4258,10 +4377,11 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.15" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20" +checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" dependencies = [ + "num-conv", "time-core", ] @@ -4292,9 +4412,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.33.0" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" +checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" dependencies = [ "backtrace", "bytes", @@ -4304,7 +4424,7 @@ dependencies = [ "parking_lot 0.12.1", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.4", + "socket2", "tokio-macros", "tracing", "windows-sys 0.48.0", @@ -4322,13 +4442,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.50", ] [[package]] @@ -4350,7 +4470,7 @@ dependencies = [ "futures-core", "pin-project-lite", "tokio", - "tokio-util 0.7.9", + "tokio-util 0.7.10", ] [[package]] @@ -4382,9 +4502,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.9" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d68074620f57a0b21594d9735eb2e98ab38b17f80d3fcb189fca266771ca60d" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" dependencies = [ "bytes", "futures-core", @@ -4405,47 +4525,47 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.3" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b150d2f463da7b52f12110d3995dc86598bf90d535e929e5f5af15ab89155011" +checksum = "9a9aad4a3066010876e8dcf5a8a06e70a558751117a145c6ce2b82c2e2054290" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.20.3", + "toml_edit 0.22.6", ] [[package]] name = "toml_datetime" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51cc078118ed25af325985ff674c00c8416b0f962be67da4946854ebfc99f334" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.19.15" +version = "0.20.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" +checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" dependencies = [ - "indexmap 2.0.2", + "indexmap 2.2.3", "toml_datetime", - "winnow", + "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.20.3" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a2534c1aa199edef7108fb7d970facaa17f8f8cc5ce6bde75372cfa1051ed91" +checksum = "2c1b5fd4128cc8d3e0cb74d4ed9a9cc7c7284becd4df68f5f940e1ad123606f6" dependencies = [ - "indexmap 2.0.2", + "indexmap 2.2.3", "serde", "serde_spanned", "toml_datetime", - "winnow", + "winnow 0.6.2", ] [[package]] @@ -4457,7 +4577,34 @@ dependencies = [ "async-stream", "async-trait", "axum", - "base64 0.21.4", + "base64 0.21.7", + "bytes", + "h2", + "http", + "http-body", + "hyper", + "hyper-timeout", + "percent-encoding", + "pin-project", + "prost", + "tokio", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tonic" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76c4eb7a4e9ef9d4763600161f12f5070b92a578e1b634db88a6887844c91a13" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64 0.21.7", "bytes", "h2", "http", @@ -4485,7 +4632,33 @@ dependencies = [ "proc-macro2", "prost-build", "quote", - "syn 2.0.38", + "syn 2.0.50", +] + +[[package]] +name = "tonic-build" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4ef6dd70a610078cb4e338a0f79d06bc759ff1b22d2120c2ff02ae264ba9c2" +dependencies = [ + "prettyplease", + "proc-macro2", + "prost-build", + "quote", + "syn 2.0.50", +] + +[[package]] +name = "tonic-reflection" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "548c227bd5c0fae5925812c4ec6c66ffcfced23ea370cb823f4d18f0fc1cb6a7" +dependencies = [ + "prost", + "prost-types", + "tokio", + "tokio-stream", + "tonic 0.11.0", ] [[package]] @@ -4503,7 +4676,7 @@ dependencies = [ "rand 0.8.5", "slab", "tokio", - "tokio-util 0.7.9", + "tokio-util 0.7.10", "tower-layer", "tower-service", "tracing", @@ -4511,7 +4684,7 @@ dependencies = [ [[package]] name = "tower-batch-control" -version = "0.2.41-beta.6" +version = "0.2.41-beta.11" dependencies = [ "color-eyre", "ed25519-zebra", @@ -4523,19 +4696,18 @@ dependencies = [ "tinyvec", "tokio", "tokio-test", - "tokio-util 0.7.9", + "tokio-util 0.7.10", "tower", "tower-fallback", "tower-test", "tracing", "tracing-futures", - "zebra-consensus", "zebra-test", ] [[package]] name = "tower-fallback" -version = "0.2.41-beta.6" +version = "0.2.41-beta.11" dependencies = [ "futures-core", "pin-project", @@ -4585,11 +4757,12 @@ dependencies = [ [[package]] name = "tracing-appender" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d48f71a791638519505cefafe162606f706c25592e4bde4d97600c0195312e" +checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" dependencies = [ "crossbeam-channel", + "thiserror", "time", "tracing-subscriber", ] @@ -4602,7 +4775,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.50", ] [[package]] @@ -4659,20 +4832,31 @@ dependencies = [ [[package]] name = "tracing-log" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" +checksum = "f751112709b4e791d8ce53e32c4ed2d353565a795ce84da2285393f41557bdf2" dependencies = [ - "lazy_static", "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", "tracing-core", ] [[package]] name = "tracing-subscriber" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" dependencies = [ "matchers", "nu-ansi-term", @@ -4683,7 +4867,7 @@ dependencies = [ "thread_local", "tracing", "tracing-core", - "tracing-log", + "tracing-log 0.2.0", ] [[package]] @@ -4711,9 +4895,9 @@ dependencies = [ [[package]] name = "try-lock" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "typenum" @@ -4765,9 +4949,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" @@ -4777,18 +4961,18 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" dependencies = [ "tinyvec", ] [[package]] name = "unicode-segmentation" -version = "1.10.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" +checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" [[package]] name = "unicode-width" @@ -4818,13 +5002,19 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + [[package]] name = "ureq" -version = "2.8.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5ccd538d4a604753ebc2f17cd9946e89b77bf87f6a8e2309667c6f2e87855e3" +checksum = "f8cdd25c339e200129fe4de81451814e5228c9b771d57378817d6117cc2b3f97" dependencies = [ - "base64 0.21.4", + "base64 0.21.7", "log", "once_cell", "rustls", @@ -4835,9 +5025,9 @@ dependencies = [ [[package]] name = "url" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", "idna", @@ -4853,9 +5043,9 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.4.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d" +checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a" dependencies = [ "serde", ] @@ -4880,12 +5070,15 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "vergen" -version = "8.2.5" +version = "8.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85e7dc29b3c54a2ea67ef4f953d5ec0c4085035c0ae2d325be1c0d2144bd9f16" +checksum = "e27d6bdd219887a9eadd19e1c34f32e47fa332301184935c6d9bca26f3cca525" dependencies = [ "anyhow", + "cargo_metadata", + "cfg-if 1.0.0", "git2", + "regex", "rustc_version 0.4.0", "rustversion", "time", @@ -4995,9 +5188,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -5005,24 +5198,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.50", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.37" +version = "0.4.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" +checksum = "877b9c3f61ceea0e56331985743b13f3d25c406a7098d45180fb5f09bc19ed97" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -5032,9 +5225,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -5042,28 +5235,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.50", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" [[package]] name = "web-sys" -version = "0.3.64" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" +checksum = "96565907687f7aceb35bc5fc03770a8a0471d82e479f25832f54a0e3f4b28446" dependencies = [ "js-sys", "wasm-bindgen", @@ -5071,9 +5264,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.25.2" +version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" +checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "which" @@ -5119,21 +5312,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] -name = "windows" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" -dependencies = [ - "windows-targets 0.48.5", -] - -[[package]] -name = "windows-sys" -version = "0.45.0" +name = "windows-core" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.42.2", + "windows-targets 0.52.0", ] [[package]] @@ -5146,18 +5330,12 @@ dependencies = [ ] [[package]] -name = "windows-targets" -version = "0.42.2" +name = "windows-sys" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", + "windows-targets 0.52.0", ] [[package]] @@ -5176,10 +5354,19 @@ dependencies = [ ] [[package]] -name = "windows_aarch64_gnullvm" -version = "0.42.2" +name = "windows-targets" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", +] [[package]] name = "windows_aarch64_gnullvm" @@ -5188,10 +5375,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] -name = "windows_aarch64_msvc" -version = "0.42.2" +name = "windows_aarch64_gnullvm" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" [[package]] name = "windows_aarch64_msvc" @@ -5200,10 +5387,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] -name = "windows_i686_gnu" -version = "0.42.2" +name = "windows_aarch64_msvc" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" [[package]] name = "windows_i686_gnu" @@ -5212,10 +5399,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] -name = "windows_i686_msvc" -version = "0.42.2" +name = "windows_i686_gnu" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" [[package]] name = "windows_i686_msvc" @@ -5224,10 +5411,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] -name = "windows_x86_64_gnu" -version = "0.42.2" +name = "windows_i686_msvc" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" [[package]] name = "windows_x86_64_gnu" @@ -5236,10 +5423,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] -name = "windows_x86_64_gnullvm" -version = "0.42.2" +name = "windows_x86_64_gnu" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" [[package]] name = "windows_x86_64_gnullvm" @@ -5248,10 +5435,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] -name = "windows_x86_64_msvc" -version = "0.42.2" +name = "windows_x86_64_gnullvm" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" [[package]] name = "windows_x86_64_msvc" @@ -5259,11 +5446,26 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" + [[package]] name = "winnow" -version = "0.5.16" +version = "0.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "037711d82167854aff2018dfd193aa0fef5370f456732f0d5a0c59b0f1b4b907" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" +dependencies = [ + "memchr", +] + +[[package]] +name = "winnow" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a4191c47f15cc3ec71fcb4913cb83d58def65dd3787610213c649283b5ce178" dependencies = [ "memchr", ] @@ -5289,9 +5491,9 @@ dependencies = [ [[package]] name = "x25519-dalek" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb66477291e7e8d2b0ff1bcb900bf29489a9692816d79874bea351e7a8b6de96" +checksum = "c7e468321c81fb07fa7f4c636c3972b9100f0346e5b6a9f2bd0603a52f7ed277" dependencies = [ "curve25519-dalek", "rand_core 0.6.4", @@ -5316,9 +5518,9 @@ dependencies = [ [[package]] name = "zcash_address" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8944af5c206cf2e37020ad54618e1825501b98548d35a638b73e0ec5762df8d5" +checksum = "bce173f1d9ed4f806e310bc3a873301531e7a6dc209928584d6404e3f8228ef4" dependencies = [ "bech32", "bs58", @@ -5326,6 +5528,39 @@ dependencies = [ "zcash_encoding", ] +[[package]] +name = "zcash_client_backend" +version = "0.10.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc33f71747a93d509f7e1c047961e359a271bdf4869cc07f7f65ee1ba7df8c2" +dependencies = [ + "base64 0.21.7", + "bech32", + "bls12_381", + "bs58", + "crossbeam-channel", + "group", + "hex", + "incrementalmerkletree", + "memuse", + "nom", + "orchard", + "percent-encoding", + "prost", + "rayon", + "secrecy", + "shardtree", + "subtle", + "time", + "tonic-build 0.10.2", + "tracing", + "which", + "zcash_address", + "zcash_encoding", + "zcash_note_encryption", + "zcash_primitives", +] + [[package]] name = "zcash_encoding" version = "0.2.0" @@ -5439,7 +5674,7 @@ dependencies = [ "jubjub", "libc", "memuse", - "metrics", + "metrics 0.21.1", "orchard", "rand 0.8.5", "rand_core 0.6.4", @@ -5456,9 +5691,9 @@ dependencies = [ [[package]] name = "zebra-chain" -version = "1.0.0-beta.30" +version = "1.0.0-beta.35" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "bitflags-serde-legacy", "bitvec", "blake2b_simd", @@ -5478,7 +5713,7 @@ dependencies = [ "hex", "humantime", "incrementalmerkletree", - "itertools 0.11.0", + "itertools 0.12.1", "jubjub", "lazy_static", "num-integer", @@ -5497,7 +5732,7 @@ dependencies = [ "serde", "serde-big-array", "serde_json", - "serde_with 3.4.0", + "serde_with 3.6.1", "sha2", "spandoc", "static_assertions", @@ -5508,6 +5743,7 @@ dependencies = [ "uint", "x25519-dalek", "zcash_address", + "zcash_client_backend", "zcash_encoding", "zcash_history", "zcash_note_encryption", @@ -5517,7 +5753,7 @@ dependencies = [ [[package]] name = "zebra-consensus" -version = "1.0.0-beta.30" +version = "1.0.0-beta.35" dependencies = [ "bellman", "blake2b_simd", @@ -5532,7 +5768,7 @@ dependencies = [ "howudoin", "jubjub", "lazy_static", - "metrics", + "metrics 0.22.1", "num-integer", "once_cell", "orchard", @@ -5561,11 +5797,33 @@ dependencies = [ "zebra-test", ] +[[package]] +name = "zebra-grpc" +version = "0.1.0-alpha.2" +dependencies = [ + "color-eyre", + "futures-util", + "insta", + "prost", + "serde", + "tokio", + "tokio-stream", + "tonic 0.11.0", + "tonic-build 0.11.0", + "tonic-reflection", + "tower", + "zcash_primitives", + "zebra-chain", + "zebra-node-services", + "zebra-state", + "zebra-test", +] + [[package]] name = "zebra-network" -version = "1.0.0-beta.30" +version = "1.0.0-beta.35" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "byteorder", "bytes", "chrono", @@ -5574,10 +5832,10 @@ dependencies = [ "hex", "howudoin", "humantime-serde", - "indexmap 2.0.2", - "itertools 0.11.0", + "indexmap 2.2.3", + "itertools 0.12.1", "lazy_static", - "metrics", + "metrics 0.22.1", "num-integer", "ordered-map", "pin-project", @@ -5592,8 +5850,8 @@ dependencies = [ "thiserror", "tokio", "tokio-stream", - "tokio-util 0.7.9", - "toml 0.8.3", + "tokio-util 0.7.10", + "toml 0.8.10", "tower", "tracing", "tracing-error", @@ -5604,30 +5862,30 @@ dependencies = [ [[package]] name = "zebra-node-services" -version = "1.0.0-beta.30" +version = "1.0.0-beta.35" dependencies = [ "color-eyre", "jsonrpc-core", "reqwest", "serde", "serde_json", + "tokio", "zebra-chain", ] [[package]] name = "zebra-rpc" -version = "1.0.0-beta.30" +version = "1.0.0-beta.35" dependencies = [ "chrono", "futures", "hex", "hyper", - "indexmap 2.0.2", + "indexmap 2.2.3", "insta", "jsonrpc-core", "jsonrpc-derive", "jsonrpc-http-server", - "num_cpus", "proptest", "rand 0.8.5", "serde", @@ -5646,9 +5904,41 @@ dependencies = [ "zebra-test", ] +[[package]] +name = "zebra-scan" +version = "0.1.0-alpha.4" +dependencies = [ + "bls12_381", + "chrono", + "color-eyre", + "ff", + "futures", + "group", + "indexmap 2.2.3", + "insta", + "itertools 0.12.1", + "jubjub", + "proptest", + "proptest-derive", + "rand 0.8.5", + "semver 1.0.22", + "serde", + "tokio", + "tower", + "tracing", + "zcash_client_backend", + "zcash_note_encryption", + "zcash_primitives", + "zebra-chain", + "zebra-grpc", + "zebra-node-services", + "zebra-state", + "zebra-test", +] + [[package]] name = "zebra-script" -version = "1.0.0-beta.30" +version = "1.0.0-beta.35" dependencies = [ "displaydoc", "hex", @@ -5661,7 +5951,7 @@ dependencies = [ [[package]] name = "zebra-state" -version = "1.0.0-beta.30" +version = "1.0.0-beta.35" dependencies = [ "bincode", "chrono", @@ -5674,12 +5964,12 @@ dependencies = [ "hex-literal", "howudoin", "humantime-serde", - "indexmap 2.0.2", + "indexmap 2.2.3", "insta", - "itertools 0.11.0", + "itertools 0.12.1", "jubjub", "lazy_static", - "metrics", + "metrics 0.22.1", "mset", "once_cell", "proptest", @@ -5689,7 +5979,7 @@ dependencies = [ "regex", "rlimit", "rocksdb", - "semver 1.0.20", + "semver 1.0.22", "serde", "serde_json", "spandoc", @@ -5705,18 +5995,18 @@ dependencies = [ [[package]] name = "zebra-test" -version = "1.0.0-beta.30" +version = "1.0.0-beta.35" dependencies = [ "color-eyre", "futures", "hex", "humantime", - "indexmap 2.0.2", + "indexmap 2.2.3", "insta", - "itertools 0.11.0", + "itertools 0.12.1", "lazy_static", "once_cell", - "owo-colors", + "owo-colors 4.0.0", "proptest", "rand 0.8.5", "regex", @@ -5733,11 +6023,12 @@ dependencies = [ [[package]] name = "zebra-utils" -version = "1.0.0-beta.30" +version = "1.0.0-beta.35" dependencies = [ "color-eyre", "hex", - "itertools 0.11.0", + "itertools 0.12.1", + "jsonrpc", "regex", "reqwest", "serde_json", @@ -5747,19 +6038,22 @@ dependencies = [ "tokio", "tracing-error", "tracing-subscriber", + "zcash_client_backend", + "zcash_primitives", "zebra-chain", "zebra-node-services", "zebra-rpc", + "zebra-scan", ] [[package]] name = "zebrad" -version = "1.3.0" +version = "1.6.0" dependencies = [ "abscissa_core", "atty", "chrono", - "clap 4.4.6", + "clap 4.5.1", "color-eyre", "console-subscriber", "dirs", @@ -5769,14 +6063,14 @@ dependencies = [ "howudoin", "humantime-serde", "hyper", - "indexmap 2.0.2", + "indexmap 2.2.3", "indicatif", "inferno", "insta", "jsonrpc-core", "lazy_static", "log", - "metrics", + "metrics 0.22.1", "metrics-exporter-prometheus", "num-integer", "once_cell", @@ -5787,18 +6081,19 @@ dependencies = [ "rand 0.8.5", "rayon", "regex", - "semver 1.0.20", + "semver 1.0.22", "sentry", "serde", "serde_json", "tempfile", "thiserror", + "thread-priority", "tinyvec", "tokio", "tokio-stream", - "toml 0.8.3", - "tonic", - "tonic-build", + "toml 0.8.10", + "tonic 0.11.0", + "tonic-build 0.11.0", "tower", "tracing", "tracing-appender", @@ -5811,19 +6106,41 @@ dependencies = [ "vergen", "zebra-chain", "zebra-consensus", + "zebra-grpc", "zebra-network", "zebra-node-services", "zebra-rpc", + "zebra-scan", "zebra-state", "zebra-test", "zebra-utils", ] +[[package]] +name = "zerocopy" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.50", +] + [[package]] name = "zeroize" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" dependencies = [ "zeroize_derive", ] @@ -5836,5 +6153,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.50", ] diff --git a/Cargo.toml b/Cargo.toml index 2146e9b487f..591c0d898e1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,6 +10,8 @@ members = [ "zebra-node-services", "zebra-test", "zebra-utils", + "zebra-scan", + "zebra-grpc", "tower-batch-control", "tower-fallback", ] @@ -17,6 +19,13 @@ members = [ # Use the edition 2021 dependency resolver in the workspace, to match the crates resolver = "2" +# `cargo release` settings + +[workspace.metadata.release] + +# We always do releases from the main branch +allow-branch = ["main"] + # Compilation settings [profile.dev] @@ -47,6 +56,12 @@ opt-level = 3 [profile.dev.package.bls12_381] opt-level = 3 +[profile.dev.package.byteorder] +opt-level = 3 + +[profile.dev.package.equihash] +opt-level = 3 + [profile.dev.package.zcash_proofs] opt-level = 3 diff --git a/LICENSE-APACHE b/LICENSE-APACHE index 0dd32f48ad3..456a4aa21ad 100644 --- a/LICENSE-APACHE +++ b/LICENSE-APACHE @@ -1,4 +1,4 @@ -Copyright (c) 2019-2023 Zcash Foundation +Copyright (c) 2019-2024 Zcash Foundation Apache License Version 2.0, January 2004 diff --git a/LICENSE-MIT b/LICENSE-MIT index 31cfa1b38ee..d85e83e16ab 100644 --- a/LICENSE-MIT +++ b/LICENSE-MIT @@ -1,4 +1,4 @@ -Copyright (c) 2019-2023 Zcash Foundation +Copyright (c) 2019-2024 Zcash Foundation Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated diff --git a/README.md b/README.md index 4cbb72e4c63..a07298a0564 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ --- -[![CI Docker](https://github.com/ZcashFoundation/zebra/actions/workflows/continous-integration-docker.yml/badge.svg)](https://github.com/ZcashFoundation/zebra/actions/workflows/continous-integration-docker.yml) [![CI OSes](https://github.com/ZcashFoundation/zebra/actions/workflows/continous-integration-os.yml/badge.svg)](https://github.com/ZcashFoundation/zebra/actions/workflows/continous-integration-os.yml) [![Continuous Delivery](https://github.com/ZcashFoundation/zebra/actions/workflows/continous-delivery.yml/badge.svg)](https://github.com/ZcashFoundation/zebra/actions/workflows/continous-delivery.yml) [![codecov](https://codecov.io/gh/ZcashFoundation/zebra/branch/main/graph/badge.svg)](https://codecov.io/gh/ZcashFoundation/zebra) [![Build docs](https://github.com/ZcashFoundation/zebra/actions/workflows/docs.yml/badge.svg)](https://github.com/ZcashFoundation/zebra/actions/workflows/docs.yml) +[![CI Docker](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-integration-tests-gcp.yml/badge.svg)](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-integration-tests-gcp.yml) [![CI OSes](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-unit-tests-os.yml/badge.svg)](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-unit-tests-os.yml) [![Continuous Delivery](https://github.com/ZcashFoundation/zebra/actions/workflows/cd-deploy-nodes-gcp.yml/badge.svg)](https://github.com/ZcashFoundation/zebra/actions/workflows/cd-deploy-nodes-gcp.yml) [![codecov](https://codecov.io/gh/ZcashFoundation/zebra/branch/main/graph/badge.svg)](https://codecov.io/gh/ZcashFoundation/zebra) [![Build docs](https://github.com/ZcashFoundation/zebra/actions/workflows/docs-deploy-firebase.yml/badge.svg)](https://github.com/ZcashFoundation/zebra/actions/workflows/docs-deploy-firebase.yml) ![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg) ## Contents @@ -27,14 +27,13 @@ consensus-compatible implementation of a Zcash node. Zebra's network stack is interoperable with `zcashd`, and Zebra implements all the features required to reach Zcash network consensus, including the validation of all the consensus rules for the NU5 network upgrade. -[Here](https://doc.zebra.zfnd.org/zebrad/index.html#zebra-advantages) are some +[Here](https://docs.rs/zebrad/latest/zebrad/index.html#zebra-advantages) are some benefits of Zebra. Zebra validates blocks and transactions, but needs extra software to generate them: -- To generate transactions, [run Zebra with - `lightwalletd`](https://zebra.zfnd.org/user/lightwalletd.html). +- To generate transactions, [run Zebra with `lightwalletd`](https://zebra.zfnd.org/user/lightwalletd.html). - To generate blocks, use a mining pool or miner with Zebra's mining JSON-RPCs. Currently Zebra can only send mining rewards to a single fixed address. To distribute rewards, use mining software that creates its own distribution transactions, @@ -62,8 +61,8 @@ For more information, read our [Docker documentation](https://zebra.zfnd.org/use ### Building Zebra Building Zebra requires [Rust](https://www.rust-lang.org/tools/install), -[libclang](https://clang.llvm.org/doxygen/group__CINDEX.html), -[pkg-config](http://pkgconf.org/), and a C++ compiler. +[libclang](https://clang.llvm.org/doxygen/group__CINDEX.html), and a C++ +compiler. Zebra is tested with the latest `stable` Rust version. Earlier versions are not supported or tested. Any Zebra release can start depending on new features in the @@ -73,9 +72,11 @@ Every few weeks, we release a [new Zebra version](https://github.com/ZcashFounda Below are quick summaries for installing the dependencies on your machine. -
+[//]: # "The empty line in the `summary` tag below is required for correct Markdown rendering." +
-

General instructions for installing dependencies

+#### General instructions for installing dependencies +
1. Install [`cargo` and `rustc`](https://www.rust-lang.org/tools/install). @@ -85,16 +86,25 @@ Below are quick summaries for installing the dependencies on your machine. package manager. Typical names are `libclang`, `libclang-dev`, `llvm`, or `llvm-dev`. - **clang** or another C++ compiler: `g++` (all platforms) or `Xcode` (macOS). - - **pkg-config** + - **[`protoc`](https://grpc.io/docs/protoc-installation/)** + +> [!NOTE] +> Zebra uses the `--experimental_allow_proto3_optional` flag with `protoc` +> during compilation. This flag was introduced in [Protocol Buffers +> v3.12.0](https://github.com/protocolbuffers/protobuf/releases/tag/v3.12.0) +> released in May 16, 2020, so make sure you're not using a version of `protoc` +> older than 3.12.
-
+[//]: # "The empty line in the `summary` tag below is required for correct Markdown rendering." +
-

Dependencies on Arch

+#### Dependencies on Arch +
```sh -sudo pacman -S rust clang pkgconf +sudo pacman -S rust clang protobuf ``` Note that the package `clang` includes `libclang` as well as the C++ compiler. @@ -118,6 +128,14 @@ sections in the book for more details. #### Optional Configs & Features +##### Initializing Configuration File + +```console +zebrad generate -o ~/.config/zebrad.toml +``` + +The above command places the generated `zebrad.toml` config file in the default preferences directory of Linux. For other OSes default locations [see here](https://docs.rs/dirs/latest/dirs/fn.preference_dir.html). + ##### Configuring Progress Bars Configure `tracing.progress_bar` in your `zebrad.toml` to @@ -142,6 +160,7 @@ You can also build Zebra with additional [Cargo features](https://doc.rust-lang. - `prometheus` for [Prometheus metrics](https://zebra.zfnd.org/user/metrics.html) - `sentry` for [Sentry monitoring](https://zebra.zfnd.org/user/tracing.html#sentry-production-monitoring) - `elasticsearch` for [experimental Elasticsearch support](https://zebra.zfnd.org/user/elasticsearch.html) +- `shielded-scan` for [experimental shielded scan support](https://zebra.zfnd.org/user/shielded-scan.html) You can combine multiple features by listing them as parameters of the `--features` flag: @@ -149,8 +168,7 @@ You can combine multiple features by listing them as parameters of the `--featur cargo install --features=" ..." ... ``` -Our full list of experimental and developer features is in [the API -documentation](https://doc.zebra.zfnd.org/zebrad/index.html#zebra-feature-flags). +Our full list of experimental and developer features is in [the API documentation](https://docs.rs/zebrad/latest/zebrad/index.html#zebra-feature-flags). Some debugging and monitoring features are disabled in release builds to increase performance. @@ -159,30 +177,33 @@ performance. There are a few bugs in Zebra that we're still working on fixing: +- [The `getpeerinfo` RPC shows current and recent outbound connections](https://github.com/ZcashFoundation/zebra/issues/7893), rather than current inbound and outbound connections. + - [Progress bar estimates can become extremely large](https://github.com/console-rs/indicatif/issues/556). We're waiting on a fix in the progress bar library. - Zebra currently gossips and connects to [private IP addresses](https://en.wikipedia.org/wiki/IP_address#Private_addresses), we want to [disable private IPs but provide a config (#3117)](https://github.com/ZcashFoundation/zebra/issues/3117) in an upcoming release - Block download and verification sometimes times out during Zebra's initial sync [#5709](https://github.com/ZcashFoundation/zebra/issues/5709). The full sync still finishes reasonably quickly. -- Rust 1.70 [causes crashes during shutdown on macOS x86_64 (#6812)](https://github.com/ZcashFoundation/zebra/issues/6812). The state cache should stay valid despite the crash. - - No Windows support [#3801](https://github.com/ZcashFoundation/zebra/issues/3801). We used to test with Windows Server 2019, but not any more; see the issue for details. - Experimental Tor support is disabled until [Zebra upgrades to the latest `arti-client`](https://github.com/ZcashFoundation/zebra/issues/5492). This happened due to a Rust dependency conflict, which could only be resolved by `arti` upgrading to a version of `x25519-dalek` with the dependency fix. -## Future Work +## Documentation -We will continue to add new features as part of future network upgrades, and in response to community feedback. +The Zcash Foundation maintains the following resources documenting Zebra: -## Documentation +- The Zebra Book: + - [General Introduction](https://zebra.zfnd.org/index.html), + - [User Documentation](https://zebra.zfnd.org/user.html), + - [Developer Documentation](https://zebra.zfnd.org/dev.html). + +- The [documentation of the public + APIs](https://docs.rs/zebrad/latest/zebrad/#zebra-crates) for the latest + releases of the individual Zebra crates. -The [Zebra website](https://zebra.zfnd.org/) contains user documentation, such -as how to run or configure Zebra, set up metrics integrations, etc., as well as -developer documentation, such as design documents. We also render [API -documentation](https://doc.zebra.zfnd.org) for the external API of our crates, -as well as [internal documentation](https://doc-internal.zebra.zfnd.org) for -private APIs. +- The [documentation of the internal APIs](https://doc-internal.zebra.zfnd.org) + for the `main` branch of the whole Zebra monorepo. ## User support diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 9f43a554cb1..884d2fc6704 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -16,7 +16,10 @@ - [Mining](user/mining.md) - [Testnet Mining with s-nomp](user/mining-testnet-s-nomp.md) - [Mining with Zebra in Docker](user/mining-docker.md) + - [Shielded Scanning](user/shielded-scan.md) + - [Shielded Scanning gRPC Server](user/shielded-scan-grpc-server.md) - [Kibana blockchain explorer](user/elasticsearch.md) + - [Forking the Zcash Testnet with Zebra](user/fork-zebra-testnet.md) - [Troubleshooting](user/troubleshooting.md) - [Developer Documentation](dev.md) - [Contribution Guide](CONTRIBUTING.md) diff --git a/book/src/api.md b/book/src/api.md index c24898b82a2..792a70c5b00 100644 --- a/book/src/api.md +++ b/book/src/api.md @@ -1,6 +1,10 @@ # API Reference -Zebra's API documentation is generated using Rustdoc: +The Zcash Foundation maintains the following API documentation for Zebra: -- [`doc.zebra.zfnd.org`](https://doc.zebra.zfnd.org/) renders documentation for the public API; -- [`doc-internal.zebra.zfnd.org`](https://doc-internal.zebra.zfnd.org/) renders documentation for the internal API. \ No newline at end of file +- The [documentation of the public + APIs](https://docs.rs/zebrad/latest/zebrad/#zebra-crates) for the latest + releases of the individual Zebra crates. + +- The [documentation of the internal APIs](https://doc-internal.zebra.zfnd.org) + for the `main` branch of the whole Zebra monorepo. diff --git a/book/src/dev.md b/book/src/dev.md index 7e7341a1cda..6115e734de0 100644 --- a/book/src/dev.md +++ b/book/src/dev.md @@ -1,7 +1,11 @@ # Developer Documentation -This section contains the contribution guide and design documentation. It -does not contain API documentation, which is generated using Rustdoc: +This section contains the contribution guide and design documentation. It does +not contain: -- [`doc.zebra.zfnd.org`](https://doc.zebra.zfnd.org/) renders documentation for the public API; -- [`doc-internal.zebra.zfnd.org`](https://doc-internal.zebra.zfnd.org/) renders documentation for the internal API. +- The [documentation of the public + APIs](https://docs.rs/zebrad/latest/zebrad/#zebra-crates) for the latest + releases of the individual Zebra crates. + +- The [documentation of the internal APIs](https://doc-internal.zebra.zfnd.org) + for the `main` branch of the whole Zebra monorepo. diff --git a/book/src/dev/continuous-delivery.md b/book/src/dev/continuous-delivery.md index e3592a145b5..30c3ed7e86b 100644 --- a/book/src/dev/continuous-delivery.md +++ b/book/src/dev/continuous-delivery.md @@ -25,4 +25,4 @@ A single instance can also be deployed, on an on-demand basis, if required, when long-lived instance, with specific changes, is needed to be tested in the Mainnet with the same infrastructure used for CI & CD. -Further validations of the actual process can be done on our continuous delivery [workflow file](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/continous-delivery.yml). +Further validations of the actual process can be done on our continuous delivery [workflow file](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/cd-deploy-nodes-gcp.yml). diff --git a/book/src/dev/continuous-integration.md b/book/src/dev/continuous-integration.md index 4310f904013..d59ad00aeee 100644 --- a/book/src/dev/continuous-integration.md +++ b/book/src/dev/continuous-integration.md @@ -4,7 +4,7 @@ Zebra has extensive continuous integration tests for node syncing and `lightwalletd` integration. -On every PR change, Zebra runs [these Docker tests](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/continous-integration-docker.yml): +On every PR change, Zebra runs [these Docker tests](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/ci-integration-tests-gcp.yml): - Zebra update syncs from a cached state Google Cloud tip image - lightwalletd full syncs from a cached state Google Cloud tip image - lightwalletd update syncs from a cached state Google Cloud tip image @@ -21,7 +21,7 @@ which are shared by all tests. Tests prefer the latest image generated from the But if a state from the same commit is not available, tests will use the latest image from any branch and commit, as long as the state version is the same. -Zebra also does [a smaller set of tests](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/continous-integration-os.yml) on tier 2 platforms using GitHub actions runners. +Zebra also does [a smaller set of tests](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/ci-unit-tests-os.yml) on tier 2 platforms using GitHub actions runners. ## Automated Merges @@ -33,14 +33,18 @@ We try to use Mergify as much as we can, so all PRs get consistent checks. Some PRs don't use Mergify: - Mergify config updates - Admin merges, which happen when there are multiple failures on the `main` branch - (these are disabled by our branch protection rules, but admins can remove the "don't allow bypassing these rules" setting) -- Manual merges (these are usually disabled by our branch protection rules) +- Manual merges (these are allowed by our branch protection rules, but we almost always use Mergify) + +Merging with failing CI is usually disabled by our branch protection rules. +See the `Admin: Manually Merging PRs` section below for manual merge instructions. We use workflow conditions to skip some checks on PRs, Mergify, or the `main` branch. For example, some workflow changes skip Rust code checks. When a workflow can skip a check, we need to create [a patch workflow](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/collaborating-on-repositories-with-code-quality-features/troubleshooting-required-status-checks#handling-skipped-but-required-checks) with an empty job with the same name. This is a [known Actions issue](https://github.com/orgs/community/discussions/13690#discussioncomment-6653382). This lets the branch protection rules pass when the job is skipped. In Zebra, we name these workflows with the extension `.patch.yml`. +### Branch Protection Rules + Branch protection rules should be added for every failure that should stop a PR merging, break a release, or cause problems for Zebra users. We also add branch protection rules for developer or devops features that we need to keep working, like coverage. @@ -55,6 +59,60 @@ When a new job is added in a PR, use the `#devops` Slack channel to ask a GitHub Adding a new Zebra crate automatically adds a new job to build that crate by itself in [ci-build-crates.yml](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/ci-build-crates.yml), so new crate PRs also need to add a branch protection rule. +#### Admin: Changing Branch Protection Rules + +[Zebra repository admins](https://github.com/orgs/ZcashFoundation/teams/zebra-admins) and +[Zcash Foundation organisation owners](https://github.com/orgs/ZcashFoundation/people?query=role%3Aowner) +can add or delete branch protection rules in the Zebra repository. + +To change branch protection rules: + +Any developer: + +0. Run a PR containing the new rule, so its name is available to autocomplete. +1. If the job doesn't run on all PRs, add a patch job with the name of the job. + If the job calls a reusable workflow, the name is `Caller job / Reusable step`. + (The name of the job inside the reusable workflow is ignored.) + +Admin: + +2. Go to the [branch protection rule settings](https://github.com/ZcashFoundation/zebra/settings/branches) +3. Click on `Edit` for the `main` branch +4. Scroll down to the `Require status checks to pass before merging` section. + (This section must always be enabled. If it is disabled, all the rules get deleted.) + +To add jobs: + +5. Start typing the name of the job or step in the search box +6. Select the name of the job or step to add it + +To remove jobs: + +7. Go to `Status checks that are required.` +8. Find the job name, and click the cross on the right to remove it + +And finally: + +9. Click `Save changes`, using your security key if needed + +If you accidentally delete a lot of rules, and you can't remember what they were, ask a +ZF organisation owner to send you a copy of the rules from the [audit log](https://github.com/organizations/ZcashFoundation/settings/audit-log). + +Organisation owners can also monitor rule changes and other security settings using this log. + +#### Admin: Manually Merging PRs + +Admins can allow merges with failing CI, to fix CI when multiple issues are causing failures. + +Admin: +1. Follow steps 2 and 3 above to open the `main` branch protection rule settings +2. Scroll down to `Do not allow bypassing the above settings` +3. Uncheck it +4. Click `Save changes` +5. Do the manual merge, and put an explanation on the PR +6. Re-open the branch protection rule settings, and re-enable `Do not allow bypassing the above settings` + + ### Pull Requests from Forked Repositories GitHub doesn't allow PRs from forked repositories to have access to our repository secret keys, even after we approve their CI. @@ -64,7 +122,7 @@ Until we [fix this CI bug](https://github.com/ZcashFoundation/zebra/issues/4529) 1. Reviewing the code to make sure it won't give our secret keys to anyone 2. Pushing a copy of the branch to the Zebra repository 3. Opening a PR using that branch -4. Closing the original PR with a note that it will be merged (this is reauired by Mergify) +4. Closing the original PR with a note that it will be merged (closing duplicate PRs is required by Mergify) 5. Asking another Zebra developer to approve the new PR ## Manual Testing Using Google Cloud @@ -182,7 +240,7 @@ To fix duplicate dependencies, follow these steps until the duplicate dependenci If the Docker cached state disks are full, increase the disk sizes in: - [deploy-gcp-tests.yml](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/deploy-gcp-tests.yml) -- [continous-delivery.yml](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/continous-delivery.yml) +- [cd-deploy-nodes-gcp.yml](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/cd-deploy-nodes-gcp.yml) If the GitHub Actions disks are full, follow these steps until the errors are fixed: diff --git a/book/src/dev/overview.md b/book/src/dev/overview.md index 2142be8b0fc..afefbd33403 100644 --- a/book/src/dev/overview.md +++ b/book/src/dev/overview.md @@ -86,7 +86,7 @@ other functionality, without requiring a full node. At a high level, the fullnode functionality required by `zebrad` is factored into several components: -- [`zebra-chain`](https://doc.zebra.zfnd.org/zebra_chain/index.html), providing +- [`zebra-chain`](https://docs.rs/zebra_chain), providing definitions of core data structures for Zcash, such as blocks, transactions, addresses, etc., and related functionality. It also contains the implementation of the consensus-critical serialization formats used in Zcash. @@ -100,7 +100,7 @@ into several components: towards verifying transactions, but will be extended to support creating them in the future. -- [`zebra-network`](https://doc.zebra.zfnd.org/zebra_network/index.html), +- [`zebra-network`](https://docs.rs/zebra_network), providing an asynchronous, multithreaded implementation of the Zcash network protocol inherited from Bitcoin. In contrast to `zcashd`, each peer connection has a separate state machine, and the crate translates the @@ -113,21 +113,21 @@ into several components: isolated from all other node state. This can be used, for instance, to safely relay data over Tor, without revealing distinguishing information. -- [`zebra-script`](https://doc.zebra.zfnd.org/zebra_script/index.html) provides +- [`zebra-script`](https://docs.rs/zebra_script) provides script validation. Currently, this is implemented by linking to the C++ script verification code from `zcashd`, but in the future we may implement a pure-Rust script implementation. -- [`zebra-consensus`](https://doc.zebra.zfnd.org/zebra_consensus/index.html) +- [`zebra-consensus`](https://docs.rs/zebra_consensus) performs [*semantic validation*](https://zebra.zfnd.org/dev/rfcs/0002-parallel-verification.html#verification-stages) of blocks and transactions: all consensus rules that can be checked independently of the chain state, such as verification of signatures, proofs, and scripts. Internally, the library - uses [`tower-batch-control`](https://doc.zebra.zfnd.org/tower_batch_control/index.html) to + uses [`tower-batch-control`](https://docs.rs/tower_batch_control) to perform automatic, transparent batch processing of contemporaneous verification requests. -- [`zebra-state`](https://doc.zebra.zfnd.org/zebra_state/index.html) is +- [`zebra-state`](https://docs.rs/zebra_state) is responsible for storing, updating, and querying the chain state. The state service is responsible for [*contextual verification*](https://zebra.zfnd.org/dev/rfcs/0002-parallel-verification.html#verification-stages): all consensus rules @@ -135,7 +135,7 @@ into several components: such as updating the nullifier set or checking that transaction inputs remain unspent. -- [`zebrad`](https://doc.zebra.zfnd.org/zebrad/index.html) contains the full +- [`zebrad`](https://docs.rs/zebrad) contains the full node, which connects these components together and implements logic to handle inbound requests from peers and the chain sync process. diff --git a/book/src/dev/rfcs/0004-asynchronous-script-verification.md b/book/src/dev/rfcs/0004-asynchronous-script-verification.md index 23800002119..439424044f9 100644 --- a/book/src/dev/rfcs/0004-asynchronous-script-verification.md +++ b/book/src/dev/rfcs/0004-asynchronous-script-verification.md @@ -31,11 +31,11 @@ and in parallel on a thread pool. - unlock script: a script satisfying the conditions of the lock script, allowing a UTXO to be spent. Stored in the [`transparent::Input::PrevOut::lock_script`][lock_script] field. -[transout]: https://doc.zebra.zfnd.org/zebra_chain/transparent/struct.Output.html -[outpoint]: https://doc.zebra.zfnd.org/zebra_chain/transparent/struct.OutPoint.html -[lock_script]: https://doc.zebra.zfnd.org/zebra_chain/transparent/struct.Output.html#structfield.lock_script -[transin]: https://doc.zebra.zfnd.org/zebra_chain/transparent/enum.Input.html -[unlock_script]: https://doc.zebra.zfnd.org/zebra_chain/transparent/enum.Input.html#variant.PrevOut.field.unlock_script +[transout]: https://doc-internal.zebra.zfnd.org/zebra_chain/transparent/struct.Output.html +[outpoint]: https://doc-internal.zebra.zfnd.org/zebra_chain/transparent/struct.OutPoint.html +[lock_script]: https://doc-internal.zebra.zfnd.org/zebra_chain/transparent/struct.Output.html#structfield.lock_script +[transin]: https://doc-internal.zebra.zfnd.org/zebra_chain/transparent/enum.Input.html +[unlock_script]: https://doc-internal.zebra.zfnd.org/zebra_chain/transparent/enum.Input.html#variant.PrevOut.field.unlock_script # Guide-level explanation diff --git a/book/src/dev/rfcs/0005-state-updates.md b/book/src/dev/rfcs/0005-state-updates.md index 7767975fd11..41bae0a07f6 100644 --- a/book/src/dev/rfcs/0005-state-updates.md +++ b/book/src/dev/rfcs/0005-state-updates.md @@ -713,9 +713,9 @@ validation and the anchor calculations.) Hypothetically, if Sapling were activated from genesis, the specification requires a Sapling anchor, but `zcashd` would ignore that anchor. -[`JoinSplit`]: https://doc.zebra.zfnd.org/zebra_chain/sprout/struct.JoinSplit.html -[`Spend`]: https://doc.zebra.zfnd.org/zebra_chain/sapling/spend/struct.Spend.html -[`Action`]: https://doc.zebra.zfnd.org/zebra_chain/orchard/struct.Action.html +[`JoinSplit`]: https://doc-internal.zebra.zfnd.org/zebra_chain/sprout/struct.JoinSplit.html +[`Spend`]: https://doc-internal.zebra.zfnd.org/zebra_chain/sapling/spend/struct.Spend.html +[`Action`]: https://doc-internal.zebra.zfnd.org/zebra_chain/orchard/struct.Action.html These updates can be performed in a batch or without necessarily iterating over all transactions, if the data is available by other means; they're diff --git a/book/src/dev/state-db-upgrades.md b/book/src/dev/state-db-upgrades.md index db5e4bce868..15f962e88b4 100644 --- a/book/src/dev/state-db-upgrades.md +++ b/book/src/dev/state-db-upgrades.md @@ -1,18 +1,178 @@ # Zebra Cached State Database Implementation +## Adding a Column Family + +Most Zebra column families are implemented using low-level methods that allow accesses using any +type. But this is error-prone, because we can accidentally use different types to read and write +them. (Or read using different types in different methods.) + +If we type the column family name out every time, a typo can lead to a panic, because the column +family doesn't exist. + +Instead: +- define the name and type of each column family at the top of the implementation module, +- add a method on the database that returns that type, and +- add the column family name to the list of column families in the database: + +For example: +```rust +/// The name of the sapling transaction IDs result column family. +pub const SAPLING_TX_IDS: &str = "sapling_tx_ids"; + +/// The column families supported by the running `zebra-scan` database code. +pub const SCANNER_COLUMN_FAMILIES_IN_CODE: &[&str] = &[ + sapling::SAPLING_TX_IDS, +]; + +/// The type for reading sapling transaction IDs results from the database. +pub type SaplingTxIdsCf<'cf> = + TypedColumnFamily<'cf, SaplingScannedDatabaseIndex, Option>; + +impl Storage { + /// Returns a typed handle to the `sapling_tx_ids` column family. + pub(crate) fn sapling_tx_ids_cf(&self) -> SaplingTxIdsCf { + SaplingTxIdsCf::new(&self.db, SAPLING_TX_IDS) + .expect("column family was created when database was created") + } +} +``` + +Then, every read of the column family uses that method, which enforces the correct types: +(These methods have the same name as the low-level methods, but are easier to call.) +```rust +impl Storage { + /// Returns the result for a specific database index (key, block height, transaction index). + pub fn sapling_result_for_index( + &self, + index: &SaplingScannedDatabaseIndex, + ) -> Option { + self.sapling_tx_ids_cf().zs_get(index).flatten() + } + + /// Returns the Sapling indexes and results in the supplied range. + fn sapling_results_in_range( + &self, + range: impl RangeBounds, + ) -> BTreeMap> { + self.sapling_tx_ids_cf().zs_items_in_range_ordered(range) + } +} +``` + +This simplifies the implementation compared with the raw `ReadDisk` methods. + +To write to the database, use the `new_batch_for_writing()` method on the column family type. +This returns a batch that enforces the correct types. Use `write_batch()` to write it to the +database: +```rust +impl Storage { + /// Insert a sapling scanning `key`, and mark all heights before `birthday_height` so they + /// won't be scanned. + pub(crate) fn insert_sapling_key( + &mut self, + storage: &Storage, + sapling_key: &SaplingScanningKey, + birthday_height: Option, + ) { + ... + self.sapling_tx_ids_cf() + .new_batch_for_writing() + .zs_insert(&index, &None) + .write_batch() + .expect("unexpected database write failure"); + } +} +``` + +To write to an existing batch in legacy code, use `with_batch_for_writing()` instead. +This relies on the caller to write the batch to the database: +```rust +impl DiskWriteBatch { + /// Updates the history tree for the tip, if it is not empty. + /// + /// The batch must be written to the database by the caller. + pub fn update_history_tree(&mut self, db: &ZebraDb, tree: &HistoryTree) { + let history_tree_cf = db.history_tree_cf().with_batch_for_writing(self); + + if let Some(tree) = tree.as_ref().as_ref() { + // The batch is modified by this method and written by the caller. + let _ = history_tree_cf.zs_insert(&(), tree); + } + } +} +``` + +To write to a legacy batch, then write it to the database, you can use +`take_batch_for_writing(batch).write_batch()`. + +During database upgrades, you might need to access the same column family using different types. +[Define a type](https://github.com/ZcashFoundation/zebra/pull/8115/files#diff-ba689ca6516946a903da62153652d91dc1bb3d0100bcf08698cb3f38ead57734R36-R53) +and [convenience method](https://github.com/ZcashFoundation/zebra/pull/8115/files#diff-ba689ca6516946a903da62153652d91dc1bb3d0100bcf08698cb3f38ead57734R69-R87) +for each legacy type, and use them during the upgrade. + +Some full examples of legacy code conversions, and the typed column family implementation itself +are in [PR #8112](https://github.com/ZcashFoundation/zebra/pull/8112/files) and +[PR #8115](https://github.com/ZcashFoundation/zebra/pull/8115/files). + +## Current Implementation + +### Verification Modes +[verification]: #verification + +Zebra's state has two verification modes: +- block hash checkpoints, and +- full verification. + +This means that verification uses two different codepaths, and they must produce the same results. + +By default, Zebra uses as many checkpoints as it can, because they are more secure against rollbacks +(and some other kinds of chain attacks). Then it uses full verification for the last few thousand +blocks. + +When Zebra gets more checkpoints in each new release, it checks the previously verified cached +state against those checkpoints. This checks that the two codepaths produce the same results. + ## Upgrading the State Database +[upgrades]: #upgrades For most state upgrades, we want to modify the database format of the existing database. If we change the major database version, every user needs to re-download and re-verify all the blocks, which can take days. +### Writing Blocks to the State +[write-block]: #write-block + +Blocks can be written to the database via two different code paths, and both must produce the same results: + +- Upgrading a pre-existing database to the latest format +- Writing newly-synced blocks in the latest format + +This code is high risk, because discovering bugs is tricky, and fixing bugs can require a full reset +and re-write of an entire column family. + +Most Zebra instances will do an upgrade, because they already have a cached state, and upgrades are +faster. But we run a full sync in CI every week, because new users use that codepath. (And it is +their first experience of Zebra.) + +When Zebra starts up and shuts down (and periodically in CI tests), we run checks on the state +format. This makes sure that the two codepaths produce the same state on disk. + +To reduce code and testing complexity: +- when a previous Zebra version opens a newer state, the entire state is considered to have that lower version, and +- when a newer Zebra version opens an older state, each required upgrade is run on the entire state. + ### In-Place Upgrade Goals +[upgrade-goals]: #upgrade-goals +Here are the goals of in-place upgrades: - avoid a full download and rebuild of the state -- the previous state format must be able to be loaded by the new state +- Zebra must be able to upgrade the format from previous minor or patch versions of its disk format + (Major disk format versions are breaking changes. They create a new empty state and re-sync the whole chain.) - this is checked the first time CI runs on a PR with a new state version. After the first CI run, the cached state is marked as upgraded, so the upgrade doesn't run again. If CI fails on the first run, any cached states with that version should be deleted. +- the upgrade and full sync formats must be identical + - this is partially checked by the state validity checks for each upgrade (see above) - previous zebra versions should be able to load the new format - this is checked by other PRs running using the upgraded cached state, but only if a Rust PR runs after the new PR's CI finishes, but before it merges @@ -30,17 +190,26 @@ This means that: - it can't give incorrect results, because that can affect verification or wallets - it can return an error - it can only return an `Option` if the caller handles it correctly -- multiple upgrades must produce a valid state format +- full syncs and upgrades must write the same format + - the same write method should be called from both the full sync and upgrade code, + this helps prevent data inconsistencies +- repeated upgrades must produce a valid state format - if Zebra is restarted, the format upgrade will run multiple times - if an older Zebra version opens the state, data can be written in an older format -- the format must be valid before and after each database transaction or API call, because an upgrade can be cancelled at any time +- the format must be valid before and after each database transaction or API call, because an + upgrade can be cancelled at any time - multi-column family changes should made in database transactions - - if you are building new column family, disable state queries, then enable them once it's done + - if you are building new column family: + - disable state queries, then enable them once it's done, or + - do the upgrade in an order that produces correct results + (for example, some data is valid from genesis forward, and some from the tip backward) - if each database API call produces a valid format, transactions aren't needed -If there is an upgrade failure, it can panic and tell the user to delete their cached state and re-launch Zebra. +If there is an upgrade failure, panic and tell the user to delete their cached state and re-launch +Zebra. -### Performance Constraints +#### Performance Constraints +[performance]: #performance Some column family access patterns can lead to very poor performance. @@ -62,17 +231,50 @@ But we need to use iterators for some operations, so our alternatives are (in pr Currently only UTXOs require key deletion, and only `utxo_loc_by_transparent_addr_loc` requires deletion and iterators. -### Implementation Steps +### Required Tests +[testing]: #testing + +State upgrades are a high-risk change. They permanently modify the state format on production Zebra +instances. Format issues are tricky to diagnose, and require extensive testing and a new release to +fix. Deleting and rebuilding an entire column family can also be costly, taking minutes or hours the +first time a cached state is upgraded to a new Zebra release. + +Some format bugs can't be fixed, and require an entire rebuild of the state. For example, deleting +or corrupting transactions or block headers. + +So testing format upgrades is extremely important. Every state format upgrade should test: +- new format serializations +- new calculations or data processing +- the upgrade produces a valid format +- a full sync produces a valid format + +Together, the tests should cover every code path. For example, the subtrees needed mid-block, +end-of-block, sapling, and orchard tests. They mainly used the validity checks for coverage. + +Each test should be followed by a restart, a sync of 200+ blocks, and another restart. This +simulates typical user behaviour. + +And ideally: +- An upgrade from the earliest supported Zebra version + (the CI sync-past-checkpoint tests do this on every PR) -- [ ] update the [database format](https://github.com/ZcashFoundation/zebra/blob/main/book/src/dev/state-db-upgrades.md#current) in the Zebra docs -- [ ] increment the state minor version -- [ ] write the new format in the block write task -- [ ] update older formats in the format upgrade task -- [ ] test that the new format works when creating a new state, and updating an older state +#### Manually Triggering a Format Upgrade +[manual-upgrade]: #manual-upgrade -See the [upgrade design docs](https://github.com/ZcashFoundation/zebra/blob/main/book/src/dev/state-db-upgrades.md#design) for more details. +Zebra stores the current state minor and patch versions in a `version` file in the database +directory. This path varies based on the OS, major state version, network, and config. -These steps can be copied into tickets. +For example, the default mainnet state version on Linux is at: +`~/.cache/zebra/state/v25/mainnet/version` + +To upgrade a cached Zebra state from `v25.0.0` to the latest disk format, delete the version file. +To upgrade from a specific version `v25.x.y`, edit the file so it contains `x.y`. + +Editing the file and running Zebra will trigger a re-upgrade over an existing state. +Re-upgrades can hide format bugs. For example, if the old code was correct, and the +new code skips blocks, the validity checks won't find that bug. + +So it is better to test with a full sync, and an older cached state. ## Current State Database Format [current]: #current @@ -124,8 +326,13 @@ We use the following rocksdb column families: | `history_tree` | `()` | `NonEmptyHistoryTree` | Update | | `tip_chain_value_pool` | `()` | `ValueBalance` | Update | -Zcash structures are encoded using `ZcashSerialize`/`ZcashDeserialize`. -Other structures are encoded using `IntoDisk`/`FromDisk`. +### Data Formats +[rocksdb-data-format]: #rocksdb-data-format + +We use big-endian encoding for keys, to allow database index prefix searches. + +Most Zcash protocol structures are encoded using `ZcashSerialize`/`ZcashDeserialize`. +Other structures are encoded using custom `IntoDisk`/`FromDisk` implementations. Block and Transaction Data: - `Height`: 24 bits, big-endian, unsigned (allows for ~30 years worth of blocks) @@ -145,17 +352,21 @@ Block and Transaction Data: - `NoteCommitmentSubtreeIndex`: 16 bits, big-endian, unsigned - `NoteCommitmentSubtreeData<{sapling, orchard}::tree::Node>`: `Height \|\| {sapling, orchard}::tree::Node` -We use big-endian encoding for keys, to allow database index prefix searches. - Amounts: - `Amount`: 64 bits, little-endian, signed - `ValueBalance`: `[Amount; 4]` -Derived Formats: +Derived Formats (legacy): - `*::NoteCommitmentTree`: `bincode` using `serde` - stored note commitment trees always have cached roots -- `NonEmptyHistoryTree`: `bincode` using `serde`, using `zcash_history`'s `serde` implementation +- `NonEmptyHistoryTree`: `bincode` using `serde`, using our copy of an old `zcash_history` `serde` + implementation + +`bincode` is a risky format to use, because it depends on the exact order and type of struct fields. +Do not use it for new column families. +#### Address Format +[rocksdb-address-format]: #rocksdb-address-format The following figure helps visualizing the address index, which is the most complicated part. Numbers in brackets are array sizes; bold arrows are compositions (i.e. `TransactionLocation` is the @@ -200,6 +411,7 @@ Each column family handles updates differently, based on its specific consensus - Each key-value entry is created once. - Keys can be deleted, but values are never updated. - Code called by ReadStateService must ignore deleted keys, or use a read lock. + - We avoid deleting keys, and avoid using iterators on `Delete` column families, for performance. - TODO: should we prevent re-inserts of keys that have been deleted? - Update: - Each key-value entry is created once. @@ -353,8 +565,6 @@ So they should not be used for consensus-critical checks. the Merkle tree nodes as required to insert new items. For each block committed, the old tree is deleted and a new one is inserted by its new height. - **TODO:** store the sprout note commitment tree by `()`, - to avoid ReadStateService concurrent write issues. - The `{sapling, orchard}_note_commitment_tree` stores the note commitment tree state for every height, for the specific pool. Each tree is stored @@ -368,7 +578,6 @@ So they should not be used for consensus-critical checks. state. There is always a single entry for it. The tree is stored as the set of "peaks" of the "Merkle mountain range" tree structure, which is what is required to insert new items. - **TODO:** store the history tree by `()`, to avoid ReadStateService concurrent write issues. - Each `*_anchors` stores the anchor (the root of a Merkle tree) of the note commitment tree of a certain block. We only use the keys since we just need the set of anchors, diff --git a/book/src/user/docker.md b/book/src/user/docker.md index 439cefd27fe..4dc7a60d754 100644 --- a/book/src/user/docker.md +++ b/book/src/user/docker.md @@ -4,20 +4,40 @@ The easiest way to run Zebra is using [Docker](https://docs.docker.com/get-docke We've embraced Docker in Zebra for most of the solution lifecycle, from development environments to CI (in our pipelines), and deployment to end users. +> [!TIP] +> We recommend using `docker compose` sub-command over the plain `docker` CLI, especially for more advanced use-cases like running CI locally, as it provides a more convenient and powerful way to manage multi-container Docker applications. See [CI/CD Local Testing](#cicd-local-testing) for more information, and other compose files available in the [docker](https://github.com/ZcashFoundation/zebra/tree/main/docker) folder. + ## Quick usage You can deploy Zebra for daily use with the images available in [Docker Hub](https://hub.docker.com/r/zfnd/zebra) or build it locally for testing. ### Ready to use image +Using `docker compose`: + ```shell -docker run --detach zfnd/zebra:latest +docker compose -f docker/docker-compose.yml up +``` + +With plain `docker` CLI: + +```shell +docker volume create zebrad-cache + +docker run -d --platform linux/amd64 \ + --restart unless-stopped \ + --env-file .env \ + --mount type=volume,source=zebrad-cache,target=/var/cache/zebrad-cache \ + -p 8233:8233 \ + --memory 16G \ + --cpus 4 \ + zfnd/zebra ``` ### Build it locally ```shell -git clone --depth 1 --branch v1.3.0 https://github.com/ZcashFoundation/zebra.git +git clone --depth 1 --branch v1.6.0 https://github.com/ZcashFoundation/zebra.git docker build --file docker/Dockerfile --target runtime --tag zebra:local . docker run --detach zebra:local ``` @@ -32,7 +52,7 @@ You're able to specify various parameters when building or launching the Docker For example, if we'd like to enable metrics on the image, we'd build it using the following `build-arg`: -> [!WARNING] +> [!IMPORTANT] > To fully use and display the metrics, you'll need to run a Prometheus and Grafana server, and configure it to scrape and visualize the metrics endpoint. This is explained in more detailed in the [Metrics](https://zebra.zfnd.org/user/metrics.html#zebra-metrics) section of the User Guide. ```shell @@ -63,11 +83,53 @@ cache_dir = "/var/cache/zebrad-cache" endpoint_addr = "127.0.0.1:9999" ``` -### Build time arguments +### Running Zebra with Lightwalletd + +To run Zebra with Lightwalletd, we recommend using the provided `docker compose` files for Zebra and Lightwalletd, which will start both services and connect them together, while exposing ports, mounting volumes, and setting environment variables. + +```shell +docker compose -f docker/docker-compose.yml -f docker/docker-compose.lwd.yml up +``` + +### CI/CD Local Testing + +To run CI tests locally, which mimics the testing done in our CI pipelines on GitHub Actions, use the `docker-compose.test.yml` file. This setup allows for a consistent testing environment both locally and in CI. + +#### Running Tests Locally + +1. **Setting Environment Variables**: + - Modify the `test.env` file to set the desired test configurations. + - For running all tests, set `RUN_ALL_TESTS=1` in `test.env`. + +2. **Starting the Test Environment**: + - Use Docker Compose to start the testing environment: + + ```shell + docker-compose -f docker/docker-compose.test.yml up + ``` + + - This will start the Docker container and run the tests based on `test.env` settings. + +3. **Viewing Test Output**: + - The test results and logs will be displayed in the terminal. + +4. **Stopping the Environment**: + - Once testing is complete, stop the environment using: + + ```shell + docker-compose -f docker/docker-compose.test.yml down + ``` + +This approach ensures you can run the same tests locally that are run in CI, providing a robust way to validate changes before pushing to the repository. + +### Build and Run Time Configuration + +#### Build Time Arguments #### Configuration - `FEATURES`: Specifies the features to build `zebrad` with. Example: `"default-release-binaries getblocktemplate-rpcs"` +- `TEST_FEATURES`: Specifies the features for tests. Example: `"lightwalletd-grpc-tests zebra-checkpoints"` #### Logging @@ -86,9 +148,7 @@ endpoint_addr = "127.0.0.1:9999" - `SHORT_SHA`: Represents the short SHA of the commit. Example: `"a1b2c3d"` -### Run time variables - -#### Network +#### Run Time Variables - `NETWORK`: Specifies the network type. Example: `"Mainnet"` @@ -113,8 +173,10 @@ endpoint_addr = "127.0.0.1:9999" - `TRACING_ENDPOINT_ADDR`: Address for tracing endpoint. Example: `"0.0.0.0"` - `TRACING_ENDPOINT_PORT`: Port for tracing endpoint. Example: `3000` +Specific tests are defined in `docker/test.env` file and can be enabled by setting the corresponding environment variable to `1`. + ## Registries -The images built by the Zebra team are all publicly hosted. Old image versions meant to be used by our [CI pipeline](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/continous-integration-docker.yml) (`zebrad-test`, `lighwalletd`) might be deleted on a scheduled basis. +The images built by the Zebra team are all publicly hosted. Old image versions meant to be used by our [CI pipeline](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/ci-integration-tests-gcp.yml) (`zebrad-test`, `lighwalletd`) might be deleted on a scheduled basis. We use [Docker Hub](https://hub.docker.com/r/zfnd/zebra) for end-user images and [Google Artifact Registry](https://console.cloud.google.com/artifacts/docker/zfnd-dev-zebra/us/zebra) to build external tools and test images. diff --git a/book/src/user/fork-zebra-testnet.md b/book/src/user/fork-zebra-testnet.md new file mode 100644 index 00000000000..a7bf7b410c4 --- /dev/null +++ b/book/src/user/fork-zebra-testnet.md @@ -0,0 +1,177 @@ +# Forking the Zcash Testnet with Zebra + +The Zcash blockchain community consistently explores upgrades to the Zcash protocol, introducing new features to the consensus layer. This tutorial guides teams or individuals through forking the Zcash Testnet locally using Zebra, enabling testing of custom functionalities in a private testnet environment. + +As of writing, the current network upgrade on the Zcash Testnet is `Nu5`. While a future upgrade (`Nu6`) activation height will be known later, for this tutorial, we aim to activate after `Nu5`, allowing us to observe our code crossing the network upgrade and continuing isolated. + +To achieve this, we'll use [Zebra](https://github.com/ZcashFoundation/zebra) as the node, [s-nomp](https://github.com/ZcashFoundation/s-nomp) as the mining pool, and [nheqminer](https://github.com/ZcashFoundation/nheqminer) as the Equihash miner. + +**Note:** This tutorial aims to remain generally valid after `Nu6`, with adjustments to the network upgrade name and block heights. + +# Requirements + +- A modified Zebra version capable of syncing up to our chosen activation height, including the changes from the [code changes step](#code-changes). +- Mining tools: + - s-nomp pool + - nheqminer + +You may have two Zebra versions: one for syncing up to the activation height and another (preferably built on top of the first one) with the network upgrade and additional functionality. + +**Note:** For mining setup please see [How to mine with Zebra on testnet](https://zebra.zfnd.org/user/mining-testnet-s-nomp.html) + +## Sync the Testnet to a Block after `Nu5` Activation + +Select a height for the new network upgrade after `Nu5`. In the Zcash public testnet, `Nu5` activation height is `1_842_420`, and at the time of writing, the testnet was at around block `2_598_958`. To avoid dealing with checkpoints, choose a block that is not only after `Nu5` but also in the future. In this tutorial, we chose block `2_599_958`, which is 1000 blocks ahead of the current testnet tip. + +Clone Zebra, create a config file, and use `state.debug_stop_at_height` to halt the Zebra sync after reaching our chosen network upgrade block height (`2_599_958`): + +The relevant parts of the config file are: + +``` +[network] +listen_addr = "0.0.0.0:18233" +network = "Testnet" + +[state] +debug_stop_at_height = 2599958 +cache_dir = "/home/user/.cache/zebra" +``` + +Generate a Zebra config file: + +``` +zebrad generate -o myconf.toml` +``` + +Start Zebra with the modified config: + +``` +zebrad -c myconf.toml start +``` + +Wait for the sync to complete (this may take up to 24 hours, depending on testnet conditions), resulting in a state up to the desired block in `~/cache/zebra`. + +## Code changes + +We need to add the network upgrade variant to the `zcash_primitives` crate and Zebra. + + +### librustzcash / zcash_primitives + +Add the new network upgrade variant and a branch id among some changes needed for the library to compile. Here are some examples: + +- Sample internal commit: [Unclean test code](https://github.com/zcash/librustzcash/commit/76d81db22fb4c52302f81c9b3e1d98fb6b71188c) +- Public PR adding Nu6 behind a feature: [librustzcash PR #1048](https://github.com/zcash/librustzcash/pull/1048) + +After the changes, check that the library can be built with `cargo build --release`. + +## Zebra + +Here we are making changes to create an isolated network version of Zebra. In addition to your own changes, this Zebra version needs to have the following: + +- Add a `Nu6` variant to the `NetworkUpgrade` enum located in `zebra-chain/src/parameters/network_upgrade.rs`. +- Add consensus branch id, a random non-repeated string. We used `00000006` in our tests when writing this tutorial. +- Point to the modified `zcash_primitives` in `zebra-chain/Cargo.toml`. In my case, I had to replace the dependency line with something like: + + ``` + zcash_primitives = { git = "https://github.com/oxarbitrage/librustzcash", branch = "nu6-test", features = ["transparent-inputs"] } + ``` +- Make fixes needed to compile. +- Ignore how far we are from the tip in get block template: `zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs` + +Unclean test commit for Zebra: [Zebra commit](https://github.com/ZcashFoundation/zebra/commit/d05af154c897d4820999fcb968b7b62d10b26aa8) + +Make sure you can build the `zebrad` binary after the changes with `zebra build --release` + +## Configuration for isolated network + +Now that you have a synced state and a modified Zebra version, it's time to run your isolated network. Relevant parts of the configuration file: + +Relevant parts of the configuration file: + +``` +[mempool] +debug_enable_at_height = 0 + +[mining] +debug_like_zcashd = true +miner_address = 't27eWDgjFYJGVXmzrXeVjnb5J3uXDM9xH9v' + +[network] +cache_dir = false +initial_testnet_peers = [ + "dnsseed.testnet.z.cash:18233", + "testnet.seeder.zfnd.org:18233", + "testnet.is.yolo.money:18233", +] +listen_addr = "0.0.0.0:18233" +network = "Testnet" + +[rpc] +listen_addr = "0.0.0.0:18232" + +[state] +cache_dir = "/home/oxarbitrage/.cache/zebra" +``` + +- `debug_enable_at_height= 0` enables the mempool independently of the tip height. +- The `[mining]` section is necessary for mining blocks, and the rpc endpoint `rpc.listen_addr` too. +- `initial_testnet_peers` is needed as Zebra starts behind the fork block, approximately 100 blocks behind, so it needs to receive those blocks again. This is necessary until the new fork passes more than 100 blocks after the fork height. At that point, this network can be isolated, and initial_testnet_peers can be set to `[]`. +- Ensure your `state.cache_dir` is the same as when you saved state in step 1. + +Start the chain with: + +``` +zebrad -c myconf.toml start +``` + +Start s-nomp: + +``` +npm start +``` + +Start the miner: + +``` +nheqminer -l 127.0.0.1:1234 -u tmRGc4CD1UyUdbSJmTUzcB6oDqk4qUaHnnh.worker1 -t 1 +``` + +## Confirm Forked Chain + +After Zebra retrieves blocks up to your activation height from the network, the network upgrade will change, and no more valid blocks could be received from outside. + +After a while, in s-nomp, you should see submitted blocks from time to time after the fork height. + +``` +... +2023-11-24 16:32:05 [Pool] [zcash_testnet] (Thread 1) Block notification via RPC after block submission +2023-11-24 16:32:24 [Pool] [zcash_testnet] (Thread 1) Submitted Block using submitblock successfully to daemon instance(s) +2023-11-24 16:32:24 [Pool] [zcash_testnet] (Thread 1) Block found: 0049f2daaaf9e90cd8b17041de0a47350e6811c2d0c9b0aed9420e91351abe43 by tmRGc4CD1UyUdbSJmTUzcB6oDqk4qUaHnnh.worker1 +2023-11-24 16:32:24 [Pool] [zcash_testnet] (Thread 1) Block notification +... +``` + +You'll also see this in Zebra: + +``` +... +2023-11-24T19:32:05.574715Z INFO zebra_rpc::methods::get_block_template_rpcs: submit block accepted block_hash=block::Hash("0084e1df2369a1fd5f75ab2b8b24472c49812669c812c7d528b0f8f88a798578") block_height="2599968" +2023-11-24T19:32:24.661758Z INFO zebra_rpc::methods::get_block_template_rpcs: submit block accepted block_hash=block::Hash("0049f2daaaf9e90cd8b17041de0a47350e6811c2d0c9b0aed9420e91351abe43") block_height="2599969" +... +``` + +Ignore messages in Zebra related to how far you are from the tip or network/system clock issues, etc. + +Check that you are in the right branch with the curl command: + +``` +curl --silent --data-binary '{"jsonrpc": "1.0", "id":"curltest", "method": "getblockchaininfo", "params": [] }' -H 'Content-type: application/json' http://127.0.0.1:18232/ | jq +``` + +In the result, verify the tip of the chain is after your activation height for `Nu6` and that you are in branch `00000006` as expected. + + +## Final words + +Next steps depend on your use case. You might want to submit transactions with new fields, accept those transactions as part of new blocks in the forked chain, or observe changes at activation without sending transactions. Further actions are not covered in this tutorial. diff --git a/book/src/user/install.md b/book/src/user/install.md index 2a03398d6e4..a3d22362862 100644 --- a/book/src/user/install.md +++ b/book/src/user/install.md @@ -6,7 +6,6 @@ Follow the [Docker or compilation instructions](https://zebra.zfnd.org/index.htm To compile Zebra from source, you will need to [install some dependencies.](https://zebra.zfnd.org/index.html#building-zebra). - ## Alternative Compilation Methods ### Compiling Manually from git @@ -20,7 +19,7 @@ To compile Zebra directly from GitHub, or from a GitHub release source archive: ```sh git clone https://github.com/ZcashFoundation/zebra.git cd zebra -git checkout v1.3.0 +git checkout v1.6.0 ``` 3. Build and Run `zebrad` @@ -33,7 +32,7 @@ target/release/zebrad start ### Compiling from git using cargo install ```sh -cargo install --git https://github.com/ZcashFoundation/zebra --tag v1.3.0 zebrad +cargo install --git https://github.com/ZcashFoundation/zebra --tag v1.6.0 zebrad ``` ### Compiling on ARM @@ -58,7 +57,12 @@ If you're having trouble with: - use `cargo install` without `--locked` to build with the latest versions of each dependency -#### Optional Tor feature +## Experimental Shielded Scanning feature + +- install the `rocksdb-tools` or `rocksdb` packages to get the `ldb` binary, which allows expert users to + [query the scanner database](https://zebra.zfnd.org/user/shielded-scan.html). This binary is sometimes called `rocksdb_ldb`. + +## Optional Tor feature - **sqlite linker errors:** libsqlite3 is an optional dependency of the `zebra-network/tor` feature. If you don't have it installed, you might see errors like `note: /usr/bin/ld: cannot find -lsqlite3`. diff --git a/book/src/user/lightwalletd.md b/book/src/user/lightwalletd.md index 5b7645e51fc..4688c89e519 100644 --- a/book/src/user/lightwalletd.md +++ b/book/src/user/lightwalletd.md @@ -6,17 +6,21 @@ recommend using use it in testing. Other `lightwalletd` forks have limited support, see the [Sync lightwalletd](#sync-lightwalletd) section for more info. +> [!NOTE] +> You can also use `docker` to run lightwalletd with zebra. Please see our [docker documentation](./docker.md#running-zebra-with-lightwalletd) for more information. + Contents: -- [Configure zebra for lightwalletd](#configure-zebra-for-lightwalletd) - - [JSON-RPC](#json-rpc) -- [Sync Zebra](#sync-zebra) -- [Download and build lightwalletd](#download-and-build-lightwalletd) -- [Sync lightwalletd](#sync-lightwalletd) -- [Run tests](#run-tests) -- [Connect wallet to lightwalletd](#connect-wallet-to-lightwalletd) - - [Download and build the cli-wallet](#download-and-build-the-cli-wallet) - - [Run the wallet](#run-the-wallet) +- [Running lightwalletd with zebra](#running-lightwalletd-with-zebra) + - [Configure zebra for lightwalletd](#configure-zebra-for-lightwalletd) + - [JSON-RPC](#json-rpc) + - [Sync Zebra](#sync-zebra) + - [Download and build lightwalletd](#download-and-build-lightwalletd) + - [Sync lightwalletd](#sync-lightwalletd) + - [Run tests](#run-tests) + - [Connect a wallet to lightwalletd](#connect-a-wallet-to-lightwalletd) + - [Download and build the cli-wallet](#download-and-build-the-cli-wallet) + - [Run the wallet](#run-the-wallet) ## Configure zebra for lightwalletd @@ -55,7 +59,7 @@ parallel_cpu_threads = 0 ``` **WARNING:** This config allows multiple Zebra instances to share the same RPC port. -See the [RPC config documentation](https://doc.zebra.zfnd.org/zebra_rpc/config/struct.Config.html) for details. +See the [RPC config documentation](https://docs.rs/zebra_rpc/latest/zebra_rpc/config/struct.Config.html) for details. ## Sync Zebra @@ -71,7 +75,7 @@ Zebra will display information about sync process: ```console ... -zebrad::commands::start: estimated progress to chain tip sync_percent=10.783 % +zebrad::commands::start: estimated progress to chain tip sync_percent=10.783 % ... ``` @@ -79,13 +83,13 @@ Until eventually it will get there: ```console ... -zebrad::commands::start: finished initial sync to chain tip, using gossiped blocks sync_percent=100.000 % +zebrad::commands::start: finished initial sync to chain tip, using gossiped blocks sync_percent=100.000 % ... ``` You can interrupt the process at any time with `ctrl-c` and Zebra will resume the next time at around the block you were downloading when stopping the process. -When deploying for production infrastructure, the above command can/should be implemented as a server service or similar configuration. +When deploying for production infrastructure, the above command can be run as a service or daemon. For implementing zebra as a service please see [here](https://github.com/ZcashFoundation/zebra/blob/main/zebrad/systemd/zebrad.service). @@ -144,7 +148,7 @@ Wait until lightwalletd is in sync before connecting any wallet into it. You wil ## Run tests [#run-tests]: (#run-tests) -The Zebra team created tests for the interaction of `zebrad` and `lightwalletd`. +The Zebra team created tests for the interaction of `zebrad` and `lightwalletd`. To run all the Zebra `lightwalletd` tests: 1. install `lightwalletd` @@ -156,7 +160,7 @@ Please refer to [acceptance](https://github.com/ZcashFoundation/zebra/blob/main/ ## Connect a wallet to lightwalletd [#connect-wallet-to-lightwalletd]: (#connect-wallet-to-lightwalletd) -The final goal is to connect wallets to the lightwalletd service backed by Zebra. +The final goal is to connect wallets to the lightwalletd service backed by Zebra. For demo purposes we used [zecwallet-cli](https://github.com/adityapk00/zecwallet-light-cli) with the [adityapk00/lightwalletd](https://github.com/adityapk00/lightwalletd) fork. We didn't test [zecwallet-cli](https://github.com/adityapk00/zecwallet-light-cli) with [zcash/lightwalletd](https://github.com/zcash/lightwalletd) yet. @@ -184,5 +188,5 @@ Lightclient connecting to http://127.0.0.1:9067/ "total_blocks_synced": 49476 } Ready! -(main) Block:1683911 (type 'help') >> +(main) Block:1683911 (type 'help') >> ``` diff --git a/book/src/user/metrics.md b/book/src/user/metrics.md index 030f67c1df0..474ec619c2c 100644 --- a/book/src/user/metrics.md +++ b/book/src/user/metrics.md @@ -56,4 +56,4 @@ front end that you can visualize: ![image info](grafana.png) -[metrics_section]: https://doc.zebra.zfnd.org/zebrad/config/struct.MetricsSection.html +[metrics_section]: https://docs.rs/zebrad/latest/zebrad/components/metrics/struct.Config.html diff --git a/book/src/user/mining-testnet-s-nomp.md b/book/src/user/mining-testnet-s-nomp.md index 70fbc7a7647..c33033ce718 100644 --- a/book/src/user/mining-testnet-s-nomp.md +++ b/book/src/user/mining-testnet-s-nomp.md @@ -20,7 +20,6 @@ These fixes disable mining pool operator payments and miner payments: they just ```console [consensus] checkpoint_sync = true - debug_skip_parameter_preload = false [mempool] eviction_memory_time = '1h' @@ -73,11 +72,11 @@ These fixes disable mining pool operator payments and miner payments: they just
-2. [Build](https://github.com/ZcashFoundation/zebra#build-instructions) and [Run Zebra](https://zebra.zfnd.org/user/run.html) with the `getblocktemplate-rpcs` feature: +2. [Run Zebra](https://zebra.zfnd.org/user/run.html) with the config you created: ```sh - cargo run --release --features "getblocktemplate-rpcs" --bin zebrad -- -c zebrad.toml + zebrad -c zebrad.toml ``` -3. Wait a few hours for Zebra to sync to the testnet tip (on mainnet this takes 2-3 days) +3. Wait for Zebra to sync to the testnet tip. This takes 8-12 hours on testnet (or 2-3 days on mainnet) as of October 2023. ## Install `s-nomp` @@ -111,18 +110,18 @@ These fixes disable mining pool operator payments and miner payments: they just 1. `git clone https://github.com/ZcashFoundation/s-nomp` 2. `cd s-nomp` 3. Use the Zebra fixes: `git checkout zebra-mining` -4. Use node 8.11.0: +4. Use node 10: ```sh - nodenv install 8.11.0 - nodenv local 8.11.0 + nodenv install 10 + nodenv local 10 ``` or ```sh - nvm install 8.11.0 - nvm use 8.11.0 + nvm install 10 + nvm use 10 ``` 5. Update dependencies and install: @@ -137,43 +136,49 @@ These fixes disable mining pool operator payments and miner payments: they just
Arch-specific instructions -#### Install dependencies +#### Install `s-nomp` -1. Install [`redis`](https://redis.io/docs/getting-started/) and run it on the default port: +1. Install Redis, and development libraries required by S-nomp ```sh - sudo pacman -S redis - sudo systemctl start redis + sudo pacman -S redis boost libsodium ``` - -2. Install and activate [`nvm`](https://github.com/nvm-sh/nvm#installing-and-updating): + +2. Install `nvm`, Python 3.10 and `virtualenv` ```sh - sudo pacman -S nvm - unset npm_config_prefix - source /usr/share/nvm/init-nvm.sh + paru -S python310 nvm + sudo pacman -S python-virtualenv ``` -3. Install `boost` and `libsodium` development libraries: - +3. Start Redis ```sh - sudo pacman -S boost libsodium + sudo systemctl start redis ``` -#### Install `s-nomp` +4. Clone the repository -1. `git clone https://github.com/ZcashFoundation/s-nomp && cd s-nomp` + ```sh + git clone https://github.com/ZcashFoundation/s-nomp && cd s-nomp + ``` -2. Use the Zebra configs: `git checkout zebra-mining` +5. Use Node 10: + + ```sh + unset npm_config_prefix + source /usr/share/nvm/init-nvm.sh + nvm install 10 + nvm use 10 + ``` -3. Use node 8.11.0: +6. Use Python 3.10 ```sh - nvm install 8.11.0 - nvm use 8.11.0 + virtualenv -p 3.10 s-nomp + source s-nomp/bin/activate ``` -4. Update dependencies and install: +7. Update dependencies and install: ```sh npm update diff --git a/book/src/user/requirements.md b/book/src/user/requirements.md index af6b669e69e..df95aa139ba 100644 --- a/book/src/user/requirements.md +++ b/book/src/user/requirements.md @@ -31,7 +31,7 @@ Zebra uses the following inbound and outbound TCP ports: - 18233 on Testnet If you configure Zebra with a specific -[`listen_addr`](https://doc.zebra.zfnd.org/zebra_network/struct.Config.html#structfield.listen_addr), +[`listen_addr`](https://docs.rs/zebra_network/latest/zebra_network/struct.Config.html#structfield.listen_addr), it will advertise this address to other nodes for inbound connections. Outbound connections are required to sync, inbound connections are optional. Zebra also needs access to the Zcash DNS seeders, via the OS DNS resolver (usually port diff --git a/book/src/user/run.md b/book/src/user/run.md index 8d383db60f6..f308b8020f1 100644 --- a/book/src/user/run.md +++ b/book/src/user/run.md @@ -7,14 +7,15 @@ changing the config. The configuration format is the TOML encoding of the internal config structure, and documentation for all of the config options can be found -[here](https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html). +[here](https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html). - `zebrad start` starts a full node. You can run Zebra as a: -- [`lightwalletd` backend](https://zebra.zfnd.org/user/lightwalletd.html), and -- experimental [mining backend](https://zebra.zfnd.org/user/mining.html). +- [`lightwalletd` backend](https://zebra.zfnd.org/user/lightwalletd.html), +- [mining backend](https://zebra.zfnd.org/user/mining.html), or +- experimental [Sapling shielded transaction scanner](https://zebra.zfnd.org/user/shielded-scan.html). ## Supported versions diff --git a/book/src/user/shielded-scan-grpc-server.md b/book/src/user/shielded-scan-grpc-server.md new file mode 100644 index 00000000000..2b3fca56b3f --- /dev/null +++ b/book/src/user/shielded-scan-grpc-server.md @@ -0,0 +1,148 @@ +# Zebra Shielded Scanning gRPC Server + +## Get Started + +### Setup + +After setting up [Zebra Shielded Scanning](https://zebra.zfnd.org/user/shielded-scan.html), add a `listen_addr` field to the shielded-scan configuration: + +```toml +[shielded_scan] +listen_addr = "127.0.0.1:8231" +``` + +Then, run `zebrad` to start the scan gRPC server. + +Making requests to the server will also require a gRPC client, the examples here use `grpcurl`, though any gRPC client should work. + +[See installation instructions for `grpcurl` here](https://github.com/fullstorydev/grpcurl?tab=readme-ov-file#installation). + +The types can be accessed through the `zebra-grpc` crate's root `scanner` module for clients in a Rust environment, and the [`scanner.proto` file here](https://github.com/ZcashFoundation/zebra/blob/main/zebra-grpc/proto/scanner.proto) can be used to build types in other environments. + +### Usage + +To check that the gRPC server is running, try calling `scanner.Scanner/GetInfo`, for example with `grpcurl`: + +```bash +grpcurl -plaintext '127.0.0.1:8231' scanner.Scanner/GetInfo +``` + +The response should look like: + +``` +{ + "minSaplingBirthdayHeight": 419200 +} +``` + +An example request to the `Scan` method with `grpcurl` would look like: + +```bash +grpcurl -plaintext -d '{ "keys": { "key": ["sapling_extended_full_viewing_key"] } }' '127.0.0.1:8231' scanner.Scanner/Scan +``` + +This will start scanning for transactions in Zebra's state and in new blocks as they're validated. + +Or, to use the scanner gRPC server without streaming, try calling `RegisterKeys` with your Sapling extended full viewing key, waiting for the scanner to cache some results, then calling `GetResults`: + +```bash +grpcurl -plaintext -d '{ "keys": { "key": ["sapling_extended_full_viewing_key"] } }' '127.0.0.1:8231' scanner.Scanner/RegisterKeys +grpcurl -plaintext -d '{ "keys": ["sapling_extended_full_viewing_key"] }' '127.0.0.1:8231' scanner.Scanner/GetResults +``` + +## gRPC Reflection + +To see all of the provided methods with `grpcurl`, try: + +```bash +grpcurl -plaintext '127.0.0.1:8231' list scanner.Scanner +``` + +This will list the paths to each method in the `Scanner` service: +``` +scanner.Scanner.ClearResults +scanner.Scanner.DeleteKeys +scanner.Scanner.GetInfo +scanner.Scanner.GetResults +scanner.Scanner.RegisterKeys +``` + +To see the the request and response types for a method, for example the `GetResults` method, try: + + +```bash +grpcurl -plaintext '127.0.0.1:8231' describe scanner.Scanner.GetResults \ +&& grpcurl -plaintext '127.0.0.1:8231' describe scanner.GetResultsRequest \ +&& grpcurl -plaintext '127.0.0.1:8231' describe scanner.GetResultsResponse \ +&& grpcurl -plaintext '127.0.0.1:8231' describe scanner.Results \ +&& grpcurl -plaintext '127.0.0.1:8231' describe scanner.Transactions \ +&& grpcurl -plaintext '127.0.0.1:8231' describe scanner.Transaction +``` + +The response should be the request and response types for the `GetResults` method: + +``` +scanner.Scanner.GetResults is a method: +// Get all data we have stored for the given keys. +rpc GetResults ( .scanner.GetResultsRequest ) returns ( .scanner.GetResultsResponse ); +scanner.GetResultsRequest is a message: +// A request for getting results for a set of keys. +message GetResultsRequest { + // Keys for which to get results. + repeated string keys = 1; +} +scanner.GetResultsResponse is a message: +// A set of responses for each provided key of a GetResults call. +message GetResultsResponse { + // Results for each key. + map results = 1; +} +scanner.Results is a message: +// A result for a single key. +message Results { + // A height, transaction id map + map by_height = 1; +} +scanner.Transactions is a message: +// A vector of transaction hashes +message Transactions { + // Transactions + repeated Transaction transactions = 1; +} +scanner.Transaction is a message: +// Transaction data +message Transaction { + // The transaction hash/id + string hash = 1; +} +``` + +## Methods + + + +--- +#### GetInfo + +Returns basic information about the `zebra-scan` instance. + +#### RegisterKeys + +Starts scanning for a set of keys, with optional start heights, and caching the results. +Cached results can later be retrieved by calling the `GetResults` or `Scan` methods. + +#### DeleteKeys + +Stops scanning transactions for a set of keys. Deletes the keys and their cached results for the keys from zebra-scan. + +#### GetResults + +Returns cached results for a set of keys. + +#### ClearResults + +Deletes any cached results for a set of keys. + +#### Scan + +Starts scanning for a set of keys and returns a stream of results. diff --git a/book/src/user/shielded-scan.md b/book/src/user/shielded-scan.md new file mode 100644 index 00000000000..5d926be1e75 --- /dev/null +++ b/book/src/user/shielded-scan.md @@ -0,0 +1,113 @@ +# Zebra Shielded Scanning + +This document describes Zebra's shielded scanning from users' perspective. + +For now, we only support Sapling, and only store transaction IDs in the scanner results database. +Ongoing development is tracked in issue [#7728](https://github.com/ZcashFoundation/zebra/issues/7728). + +## Important Security Warning + +Zebra's shielded scanning feature has known security issues. It is for experimental use only. + +Do not use regular or sensitive viewing keys with Zebra's experimental scanning +feature. Do not use this feature on a shared machine. We suggest generating new +keys for experimental use or publicly known keys. + +## Build & Install + +Use [Zebra 1.6.0](https://github.com/ZcashFoundation/zebra/releases/tag/v1.6.0) +or greater, or the `main` branch to get the latest features, and enable the +`shielded-scan` feature during the build. You can also use Rust's `cargo` to +install the latest release: + +```bash +cargo install --features shielded-scan --locked --git https://github.com/ZcashFoundation/zebra zebrad +``` + +Zebra binary will be at `~/.cargo/bin/zebrad`, which should be in your `PATH`. + +## Configuration + +Generate a configuration file with the default settings: + +```bash +zebrad generate -o ~/.config/zebrad.toml +``` + + +In the generated `zebrad.toml` file, use: + +- the `[shielded_scan]` table for database settings, and +- the `[shielded_scan.sapling_keys_to_scan]` table for diversifiable full viewing keys. + +Sapling diversifiable/extended full viewing keys strings start with `zxviews` as +described in +[ZIP-32](https://zips.z.cash/zip-0032#sapling-extended-full-viewing-keys). + +For example, to scan the block chain with the [public ZECpages viewing +key](https://zecpages.com/boardinfo), use: + +```toml +[shielded_scan.sapling_keys_to_scan] +"zxviews1q0duytgcqqqqpqre26wkl45gvwwwd706xw608hucmvfalr759ejwf7qshjf5r9aa7323zulvz6plhttp5mltqcgs9t039cx2d09mgq05ts63n8u35hyv6h9nc9ctqqtue2u7cer2mqegunuulq2luhq3ywjcz35yyljewa4mgkgjzyfwh6fr6jd0dzd44ghk0nxdv2hnv4j5nxfwv24rwdmgllhe0p8568sgqt9ckt02v2kxf5ahtql6s0ltjpkckw8gtymxtxuu9gcr0swvz" = 419200 +``` + +Where the number 419200 is the birthday of the key: +- birthday lower than the Sapling activation height defaults to Sapling activation height. +- birthday greater or equal than Sapling activation height will start scanning at provided height, improving scanner speed. + +## Scanning the Block Chain + +Simply run + +```bash +zebrad +``` + +The scanning will start once Zebra syncs its state past the Sapling activation +height. Scanning a synced state takes between 12 and 24 hours. The scanner looks +for transactions containing Sapling notes with outputs decryptable by the +provided viewing keys. + +You should see log messages in the output every 10 000 blocks scanned, similar +to: + +``` +2023-12-16T12:14:41.526740Z INFO zebra_scan::storage::db: Last scanned height for key number 0 is 435000, resuming at 435001 +2023-12-16T12:14:41.526745Z INFO zebra_scan::storage::db: loaded Zebra scanner cache +... +2023-12-16T12:15:19.063796Z INFO {zebrad="39830b0" net="Main"}: zebra_scan::scan: Scanning the blockchain for key 0, started at block 435001, now at block 440000, current tip 2330550 +... +``` + +The Zebra scanner will resume the task if your Zebra instance went down for any +reason. In a new start, Zebra will display: + +``` +Last scanned height for key number 0 is 1798000, resuming at 1798001 +``` + +## Displaying Scanning Results + +An easy way to query the results is to use the +[Scanning Results Reader](https://github.com/ZcashFoundation/zebra/tree/main/zebra-utils#scanning-results-reader). + +## Querying Raw Scanning Results + +A more advanced way to query results is to use `ldb` tool, requires a certain level of expertise. + +Install `ldb`: + +```bash +sudo apt install rocksdb-tools +``` + +Run `ldb` with the scanner database: + +```bash +ldb --db="$HOME/.cache/zebra/private-scan/v1/mainnet" --secondary_path= --column_family=sapling_tx_ids --hex scan +``` + +Some of the output will be markers the scanner uses to keep track of progress, however, some of them will be transactions found. + +To lean more about how to filter the database please refer to [RocksDB Administration and Data Access Tool](https://github.com/facebook/rocksdb/wiki/Administration-and-Data-Access-Tool) diff --git a/book/src/user/startup.md b/book/src/user/startup.md index d7e1faf7a8e..f1f7013f354 100644 --- a/book/src/user/startup.md +++ b/book/src/user/startup.md @@ -27,7 +27,7 @@ Some important parts of the config are: - `state.cache_dir`: where the cached state is stored on disk - `rpc.listen_addr`: optional JSON-RPC listener port -See [the full list of configuration options](https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html). +See [the full list of configuration options](https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html). ``` zebrad::commands::start: Starting zebrad @@ -129,10 +129,9 @@ Zebra also starts ongoing tasks to batch verify signatures and proofs. ``` zebrad::commands::start: initializing verifiers -init{config=Config { debug_skip_parameter_preload: false, ... } ... }: zebra_consensus::primitives::groth16::params: checking and loading Zcash Sapling and Sprout parameters +init{config=Config { ... } ... }: zebra_consensus::primitives::groth16::params: checking and loading Zcash Sapling and Sprout parameters init{config=Config { checkpoint_sync: true, ... } ... }: zebra_consensus::chain: initializing chain verifier tip=None max_checkpoint_height=Height(1644839) ... -init{config=Config { debug_skip_parameter_preload: false, ... } ... }: zebra_consensus::chain: Groth16 pre-download and check task finished ``` ### Initialize Transaction Mempool diff --git a/book/src/user/supported-platforms.md b/book/src/user/supported-platforms.md index 152be56537e..cbb83cdc840 100644 --- a/book/src/user/supported-platforms.md +++ b/book/src/user/supported-platforms.md @@ -17,7 +17,7 @@ For the full requirements, see [Tier 1 platform policy](platform-tier-policy.md# | platform | os | notes | rust | artifacts | -------|-------|-------|-------|------- -| `x86_64-unknown-linux-gnu` | [Debian 11](https://www.debian.org/releases/bullseye/) | 64-bit | [latest stable release](https://github.com/rust-lang/rust/releases) | Docker +| `x86_64-unknown-linux-gnu` | [Debian 11](https://www.debian.org/releases/bookworm/) | 64-bit | [latest stable release](https://github.com/rust-lang/rust/releases) | Docker ## Tier 2 @@ -34,6 +34,7 @@ For the full requirements, see [Tier 2 platform policy](platform-tier-policy.md# | -------|-------|-------|-------|------- | `x86_64-unknown-linux-gnu` | [GitHub ubuntu-latest](https://github.com/actions/virtual-environments#available-environments) | 64-bit | [latest stable release](https://github.com/rust-lang/rust/releases) | N/A | `x86_64-unknown-linux-gnu` | [GitHub ubuntu-latest](https://github.com/actions/virtual-environments#available-environments) | 64-bit | [latest beta release](https://github.com/rust-lang/rust/blob/beta/src/version) | N/A +| `x86_64-apple-darwin` | [GitHub macos-latest](https://github.com/actions/virtual-environments#available-environments) | 64-bit | [latest stable release](https://github.com/rust-lang/rust/releases) | N/A ## Tier 3 @@ -45,6 +46,5 @@ For the full requirements, see [Tier 3 platform policy](platform-tier-policy.md# | platform | os | notes | rust | artifacts | -------|-------|-------|-------|------- -| `aarch64-unknown-linux-gnu` | [Debian 11](https://www.debian.org/releases/bullseye/) | 64-bit | [latest stable release](https://github.com/rust-lang/rust/releases) | N/A -| `x86_64-apple-darwin` | [GitHub macos-latest](https://github.com/actions/virtual-environments#available-environments) | 64-bit | [latest stable release](https://github.com/rust-lang/rust/releases) | N/A - +| `aarch64-unknown-linux-gnu` | [Debian 11](https://www.debian.org/releases/bookworm/) | 64-bit | [latest stable release](https://github.com/rust-lang/rust/releases) | N/A +| `aarch64-apple-darwin` | latest macOS | 64-bit, Apple M1 or M2 | [latest stable release](https://github.com/rust-lang/rust/releases) | N/A diff --git a/book/src/user/tracing.md b/book/src/user/tracing.md index 9d502abff8c..d1b984f05df 100644 --- a/book/src/user/tracing.md +++ b/book/src/user/tracing.md @@ -36,10 +36,10 @@ and the [`flamegraph`][flamegraph] runtime config option. Compile Zebra with `--features sentry` to monitor it using [Sentry][sentry] in production. -[tracing_section]: https://doc.zebra.zfnd.org/zebrad/components/tracing/struct.Config.html -[filter]: https://doc.zebra.zfnd.org/zebrad/components/tracing/struct.Config.html#structfield.filter -[flamegraph]: https://doc.zebra.zfnd.org/zebrad/components/tracing/struct.Config.html#structfield.flamegraph +[tracing_section]: https://docs.rs/zebrad/latest/zebrad/components/tracing/struct.Config.html +[filter]: https://docs.rs/zebrad/latest/zebrad/components/tracing/struct.Config.html#structfield.filter +[flamegraph]: https://docs.rs/zebrad/latest/zebrad/components/tracing/struct.Config.html#structfield.flamegraph [flamegraphs]: http://www.brendangregg.com/flamegraphs.html [systemd_journald]: https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html -[use_journald]: https://doc.zebra.zfnd.org/zebrad/components/tracing/struct.Config.html#structfield.use_journald +[use_journald]: https://docs.rs/zebrad/latest/zebrad/components/tracing/struct.Config.html#structfield.use_journald [sentry]: https://sentry.io/welcome/ diff --git a/book/src/user/troubleshooting.md b/book/src/user/troubleshooting.md index b1a89d43576..8a92125fa29 100644 --- a/book/src/user/troubleshooting.md +++ b/book/src/user/troubleshooting.md @@ -7,7 +7,7 @@ for: - macOS, - Ubuntu, - Docker: - - Debian Bullseye. + - Debian Bookworm. ## Memory Issues @@ -48,7 +48,7 @@ Make sure you're using a release build on your native architecture. ### Syncer Lookahead Limit If your connection is slow, try -[downloading fewer blocks at a time](https://doc.zebra.zfnd.org/zebrad/config/struct.SyncSection.html#structfield.lookahead_limit): +[downloading fewer blocks at a time](https://docs.rs/zebrad/latest/zebrad/components/sync/struct.Config.html#structfield.lookahead_limit): ```toml [sync] @@ -58,7 +58,7 @@ max_concurrent_block_requests = 25 ### Peer Set Size -If your connection is slow, try [connecting to fewer peers](https://doc.zebra.zfnd.org/zebra_network/struct.Config.html#structfield.peerset_initial_target_size): +If your connection is slow, try [connecting to fewer peers](https://docs.rs/zebra-network/latest/zebra_network/struct.Config.html#structfield.peerset_initial_target_size): ```toml [network] diff --git a/book/theme/css/custom.css b/book/theme/css/custom.css index 62fb70f38ea..7ddd666208b 100644 --- a/book/theme/css/custom.css +++ b/book/theme/css/custom.css @@ -12,4 +12,12 @@ html.navy #service-dep-diagram, html.ayu #service-dep-diagram, html.coal #service-dep-diagram { filter: invert(1); -} \ No newline at end of file +} + +h2, +h3, +h4, +h5, +h6 { + display: inline-block; +} diff --git a/deny.toml b/deny.toml index fcb321d3a67..b0b03fb5a75 100644 --- a/deny.toml +++ b/deny.toml @@ -67,18 +67,32 @@ skip-tree = [ # this duplicate dependency currently only exists in testing builds { name = "regex-syntax", version = "=0.6.29" }, + # wait for `parking_lot` crate inside `jsonrpc-http-server` to upgrade + { name = "redox_syscall", version = "=0.2.16" }, + + # wait for `color-eyre` to upgrade + { name = "owo-colors", version = "=3.5.0" }, + + # wait for `serde_with_macros` to upgrade `darling_core` + { name = "strsim", version = "=0.10.0" }, + # ZF crates # wait for indexmap, toml_edit, serde_json, tower to upgrade { name = "hashbrown", version = "=0.12.3" }, + # wait for zcash_script to upgrade + { name = "metrics", version = "=0.21.1" }, + # ECC crates - # wait for minreq and zcash_proofs to upgrade - { name = "rustls", version = "=0.20.9" }, + # wait for hdwallet to upgrade + { name = "ring", version = "=0.16.20" }, - # wait for zcash_proofs to upgrade - { name = "webpki-roots", version = "=0.22.6" }, + # wait for the equihash/solver feature to merge + # https://github.com/zcash/librustzcash/pull/1083 + # https://github.com/zcash/librustzcash/pull/1088 + { name = "equihash", version = "=0.2.0" }, # zebra-utils dependencies @@ -86,6 +100,12 @@ skip-tree = [ { name = "clap", version = "=2.34.0" }, { name = "heck", version = "=0.3.3" }, + # wait for abscissa_core to upgrade + {name = "tracing-log", version = "=0.1.4" }, + + # wait for prost to upgrade + {name = "itertools", version = "=0.11.0" }, + # Test-only dependencies # wait for tokio-test -> tokio-stream to upgrade @@ -98,6 +118,10 @@ skip-tree = [ # wait for proptest's rusty-fork dependency to upgrade quick-error { name = "quick-error", version = "=1.2.3" }, + # wait for console-subscriber to update tonic. + { name = "tonic", version = "=0.10.2" }, + { name = "tonic-build", version = "=0.10.2" }, + # Optional dependencies # upgrade abscissa (required dependency) and arti (optional dependency) @@ -124,6 +148,10 @@ unknown-git = "deny" allow-registry = ["https://github.com/rust-lang/crates.io-index"] # List of URLs for allowed Git repositories allow-git = [ + # TODO: remove this after the equihash solver branch is merged and released. + # + # "cargo deny" will log a warning in builds without the internal-miner feature. That's ok. + "https://github.com/ZcashFoundation/librustzcash.git" ] [sources.allow-org] diff --git a/docker/.env b/docker/.env new file mode 100644 index 00000000000..2d96240f23e --- /dev/null +++ b/docker/.env @@ -0,0 +1,33 @@ +RUST_LOG=info +# This variable forces the use of color in the logs +ZEBRA_FORCE_USE_COLOR=1 +LOG_COLOR=true + +### +# Configuration Variables +# These variables are used to configure the zebra node +# Check the entrypoint.sh script for more details +### + +# The config file full path used in the Dockerfile. +ZEBRA_CONF_PATH=/etc/zebrad/zebrad.toml +# [network] +NETWORK=Mainnet +ZEBRA_LISTEN_ADDR=0.0.0.0 +# [consensus] +ZEBRA_CHECKPOINT_SYNC=true +# [state] +# Set this to change the default cached state directory +ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache +LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache +# [metrics] +METRICS_ENDPOINT_ADDR=0.0.0.0 +METRICS_ENDPOINT_PORT=9999 +# [tracing] +TRACING_ENDPOINT_ADDR=0.0.0.0 +TRACING_ENDPOINT_PORT=3000 +# [rpc] +RPC_LISTEN_ADDR=0.0.0.0 +# if ${RPC_PORT} is not set, it will use the default value for the current network +RPC_PORT=8232 + diff --git a/docker/Dockerfile b/docker/Dockerfile index 8966ff12c9a..c46da1235c0 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -17,9 +17,10 @@ # https://github.com/ZcashFoundation/zebra/settings/variables/actions ARG FEATURES="default-release-binaries" ARG TEST_FEATURES="lightwalletd-grpc-tests zebra-checkpoints" +ARG EXPERIMENTAL_FEATURES="" # This stage implements cargo-chef for docker layer caching -FROM rust:bullseye as chef +FROM rust:bookworm as chef RUN cargo install cargo-chef --locked WORKDIR /opt/zebrad @@ -48,6 +49,7 @@ RUN apt-get -qq update && \ ca-certificates \ protobuf-compiler \ rsync \ + rocksdb-tools \ ; \ rm -rf /var/lib/apt/lists/* /tmp/* @@ -95,9 +97,7 @@ ENV CARGO_HOME="/opt/zebrad/.cargo/" # We also download needed dependencies for tests to work, from other images. # An entrypoint.sh is only available in this step for easier test handling with variables. FROM deps AS tests -# TODO: do not hardcode the user /root/ even though is a safe assumption -# Pre-download Zcash Sprout, Sapling parameters and Lightwalletd binary -COPY --from=us-docker.pkg.dev/zfnd-dev-zebra/zebra/zcash-params:edge /root/.zcash-params /root/.zcash-params + COPY --from=us-docker.pkg.dev/zfnd-dev-zebra/zebra/lightwalletd:edge /opt/lightwalletd /usr/local/bin # cargo uses timestamps for its cache, so they need to be in this order: @@ -112,6 +112,8 @@ ENV ZEBRA_SKIP_IPV6_TESTS=${ZEBRA_SKIP_IPV6_TESTS:-1} # separately from the test and production image builds. ARG FEATURES ARG TEST_FEATURES +ARG EXPERIMENTAL_FEATURES +# TODO: add empty $EXPERIMENTAL_FEATURES when we can avoid adding an extra space to the end of the string ARG ENTRYPOINT_FEATURES="${FEATURES} ${TEST_FEATURES}" # Re-hydrate the minimum project skeleton identified by `cargo chef prepare` in the planner stage, @@ -123,7 +125,6 @@ ARG ENTRYPOINT_FEATURES="${FEATURES} ${TEST_FEATURES}" # # TODO: add --locked when cargo-chef supports it RUN cargo chef cook --tests --release --features "${ENTRYPOINT_FEATURES}" --workspace --recipe-path recipe.json - # Undo the source file changes made by cargo-chef. # rsync invalidates the cargo cache for the changed files only, by updating their timestamps. # This makes sure the fake empty binaries created by cargo-chef are rebuilt. @@ -141,6 +142,9 @@ RUN chmod u+x /entrypoint.sh # Entrypoint environment variables ENV ENTRYPOINT_FEATURES=${ENTRYPOINT_FEATURES} +# We repeat the ARGs here, so they are available in the entrypoint.sh script for $RUN_ALL_EXPERIMENTAL_TESTS +ARG EXPERIMENTAL_FEATURES="shielded-scan journald prometheus filter-reload" +ENV ENTRYPOINT_FEATURES_EXPERIMENTAL="${ENTRYPOINT_FEATURES} ${EXPERIMENTAL_FEATURES}" # By default, runs the entrypoint tests specified by the environmental variables (if any are set) ENTRYPOINT [ "/entrypoint.sh" ] @@ -176,15 +180,16 @@ RUN chmod u+x /entrypoint.sh # This stage is only used when deploying nodes or when only the resulting zebrad binary is needed # # To save space, this step starts from scratch using debian, and only adds the resulting -# binary from the `release` stage, and the Zcash Sprout & Sapling parameters from ZCash -FROM debian:bullseye-slim AS runtime +# binary from the `release` stage +FROM debian:bookworm-slim AS runtime COPY --from=release /opt/zebrad/target/release/zebrad /usr/local/bin COPY --from=release /entrypoint.sh / -COPY --from=us-docker.pkg.dev/zfnd-dev-zebra/zebra/zcash-params:edge /root/.zcash-params /root/.zcash-params RUN apt-get update && \ apt-get install -y --no-install-recommends \ - ca-certificates + ca-certificates \ + curl \ + rocksdb-tools # Config settings for zebrad ARG FEATURES diff --git a/docker/docker-compose.lwd.yml b/docker/docker-compose.lwd.yml new file mode 100644 index 00000000000..7d8c56b1855 --- /dev/null +++ b/docker/docker-compose.lwd.yml @@ -0,0 +1,59 @@ +version: "3.8" + +services: + zebra: + ports: + - "8232:8232" # Opens an RPC endpoint (for lightwalletd and mining) + healthcheck: + start_period: 1m + interval: 15s + timeout: 10s + retries: 3 + test: ["CMD-SHELL", "curl --data-binary '{\"id\":\"curltest\", \"method\": \"getinfo\"}' -H 'content-type: application/json' 127.0.0.1:8232 || exit 1"] + + lightwalletd: + image: electriccoinco/lightwalletd + platform: linux/amd64 + depends_on: + zebra: + condition: service_started + restart: unless-stopped + deploy: + resources: + reservations: + cpus: "4" + memory: 16G + environment: + - LWD_GRPC_PORT=9067 + - LWD_HTTP_PORT=9068 + configs: + - source: lwd_config + target: /etc/lightwalletd/zcash.conf + uid: '2002' # Golang's container default user uid + gid: '2002' # Golang's container default group gid + mode: 0440 + volumes: + - litewalletd-data:/var/lib/lightwalletd/db + #! This setup with --no-tls-very-insecure is only for testing purposes + #! For production environments follow the guidelines here: https://github.com/zcash/lightwalletd#production-usage + command: > + --no-tls-very-insecure + --grpc-bind-addr=0.0.0.0:9067 + --http-bind-addr=0.0.0.0:9068 + --zcash-conf-path=/etc/lightwalletd/zcash.conf + --data-dir=/var/lib/lightwalletd/db + --log-file=/dev/stdout + --log-level=7 + ports: + - "9067:9067" # gRPC + - "9068:9068" # HTTP + +configs: + lwd_config: + # Change the following line to point to a zcash.conf on your host machine + # to allow for easy configuration changes without rebuilding the image + file: ./zcash-lightwalletd/zcash.conf + +volumes: + litewalletd-data: + driver: local diff --git a/docker/docker-compose.test.yml b/docker/docker-compose.test.yml new file mode 100644 index 00000000000..fac94e3f4db --- /dev/null +++ b/docker/docker-compose.test.yml @@ -0,0 +1,36 @@ +version: "3.8" + +services: + zebra: + build: + context: ../ + dockerfile: docker/Dockerfile + target: tests + restart: unless-stopped + deploy: + resources: + reservations: + cpus: "4" + memory: 16G + # Change this to the command you want to run, respecting the entrypoint.sh + # For example, to run the tests, use the following command: + # command: ["cargo", "test", "--locked", "--release", "--features", "${TEST_FEATURES}", "--package", "zebrad", "--test", "acceptance", "--", "--nocapture", "--include-ignored", "sync_large_checkpoints_"] + volumes: + - zebrad-cache:/var/cache/zebrad-cache + - lwd-cache:/var/cache/lwd-cache + ports: + # Zebra uses the following inbound and outbound TCP ports + - "8232:8232" # Opens an RPC endpoint (for wallet storing and mining) + - "8233:8233" # Mainnet Network (for peer connections) + - "18233:18233" # Testnet Network + # - "9999:9999" # Metrics + # - "3000:3000" # Tracing + env_file: + - test.env + +volumes: + zebrad-cache: + driver: local + + lwd-cache: + driver: local diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml new file mode 100644 index 00000000000..9264fd406d4 --- /dev/null +++ b/docker/docker-compose.yml @@ -0,0 +1,49 @@ +version: "3.8" + +services: + zebra: + image: zfnd/zebra + platform: linux/amd64 + build: + context: ../ + dockerfile: docker/Dockerfile + target: runtime + restart: unless-stopped + deploy: + resources: + reservations: + cpus: "4" + memory: 16G + env_file: + - .env + logging: + options: + max-size: "10m" + max-file: "5" + #! Uncomment the `configs` mapping below to use the `zebrad.toml` config file from the host machine + #! NOTE: This will override the zebrad.toml in the image and make some variables irrelevant + # configs: + # - source: zebra_config + # target: /etc/zebrad/zebrad.toml + # uid: '2001' # Rust's container default user uid + # gid: '2001' # Rust's container default group gid + # mode: 0440 + volumes: + - zebrad-cache:/var/cache/zebrad-cache + ports: + # Zebra uses the following default inbound and outbound TCP ports + - "8233:8233" # Mainnet Network (for peer connections) + # - "8232:8232" # Opens an RPC endpoint (for wallet storing and mining) + # - "18233:18233" # Testnet Network + # - "9999:9999" # Metrics + # - "3000:3000" # Tracing + +configs: + zebra_config: + # Change the following line to point to a zebrad.toml on your host machine + # to allow for easy configuration changes without rebuilding the image + file: ../zebrad/tests/common/configs/v1.0.0-rc.2.toml/ + +volumes: + zebrad-cache: + driver: local diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 1ffc0c0ba60..ac7ffbff4a9 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -58,6 +58,9 @@ fi #### : "${RUN_ALL_TESTS:=}" +: "${RUN_ALL_EXPERIMENTAL_TESTS:=}" +: "${TEST_FAKE_ACTIVATION_HEIGHTS:=}" +: "${TEST_ZEBRA_EMPTY_SYNC:=}" : "${ZEBRA_TEST_LIGHTWALLETD:=}" : "${FULL_SYNC_MAINNET_TIMEOUT_MINUTES:=}" : "${FULL_SYNC_TESTNET_TIMEOUT_MINUTES:=}" @@ -73,6 +76,7 @@ fi : "${TEST_LWD_TRANSACTIONS:=}" : "${TEST_GET_BLOCK_TEMPLATE:=}" : "${TEST_SUBMIT_BLOCK:=}" +: "${TEST_SCAN_START_WHERE_LEFT:=}" : "${ENTRYPOINT_FEATURES:=}" # Configuration file path @@ -220,12 +224,27 @@ case "$1" in # For these tests, we activate the test features to avoid recompiling `zebrad`, # but we don't actually run any gRPC tests. if [[ "${RUN_ALL_TESTS}" -eq "1" ]]; then - # Run all the available tests for the current environment. - # If the lightwalletd environmental variables are set, we will also run those tests. - exec cargo test --locked --release --features "${ENTRYPOINT_FEATURES}" --workspace -- --nocapture --include-ignored + # Run unit, basic acceptance tests, and ignored tests, only showing command output if the test fails. + # If the lightwalletd environmental variables are set, we will also run those tests. + exec cargo test --locked --release --features "${ENTRYPOINT_FEATURES}" --workspace -- --nocapture --include-ignored + + elif [[ "${RUN_ALL_EXPERIMENTAL_TESTS}" -eq "1" ]]; then + # Run unit, basic acceptance tests, and ignored tests with experimental features. + # If the lightwalletd environmental variables are set, we will also run those tests. + exec cargo test --locked --release --features "${ENTRYPOINT_FEATURES_EXPERIMENTAL}" --workspace -- --nocapture --include-ignored + + elif [[ "${TEST_FAKE_ACTIVATION_HEIGHTS}" -eq "1" ]]; then + # Run state tests with fake activation heights. + exec cargo test --locked --release --features "zebra-test" --package zebra-state --lib -- --nocapture --include-ignored with_fake_activation_heights + + elif [[ "${TEST_ZEBRA_EMPTY_SYNC}" -eq "1" ]]; then + # Test that Zebra syncs and checkpoints a few thousand blocks from an empty state. + run_cargo_test "${ENTRYPOINT_FEATURES}" "sync_large_checkpoints_" + + elif [[ "${ZEBRA_TEST_LIGHTWALLETD}" -eq "1" ]]; then + # Test launching lightwalletd with an empty lightwalletd and Zebra state. + run_cargo_test "${ENTRYPOINT_FEATURES}" "lightwalletd_integration" - # For these tests, we activate the gRPC feature to avoid recompiling `zebrad`, - # but we don't actually run any gRPC tests. elif [[ -n "${FULL_SYNC_MAINNET_TIMEOUT_MINUTES}" ]]; then # Run a Zebra full sync test on mainnet. run_cargo_test "${ENTRYPOINT_FEATURES}" "full_sync_mainnet" @@ -326,6 +345,11 @@ case "$1" in check_directory_files "${ZEBRA_CACHED_STATE_DIR}" run_cargo_test "${ENTRYPOINT_FEATURES}" "submit_block" + elif [[ "${TEST_SCAN_START_WHERE_LEFT}" -eq "1" ]]; then + # Test that the scanner can continue scanning where it was left when zebrad restarts. + check_directory_files "${ZEBRA_CACHED_STATE_DIR}" + run_cargo_test "shielded-scan" "scan_start_where_left" + else exec "$@" fi diff --git a/docker/test.env b/docker/test.env new file mode 100644 index 00000000000..fd2a7c876b7 --- /dev/null +++ b/docker/test.env @@ -0,0 +1,60 @@ +### +# Configuration Variables +# These variables are used to configure the zebra node +# Check the entrypoint.sh script for more details +### + +# Set this to change the default log level (must be set at build time) +RUST_LOG=info +# This variable forces the use of color in the logs +ZEBRA_FORCE_USE_COLOR=1 +LOG_COLOR=true +# Path to the config file. This variable has a default set in entrypoint.sh +# ZEBRA_CONF_PATH=/etc/zebrad/zebrad.toml +# [network] +NETWORK=Mainnet +# [state] +# Set this to change the default cached state directory +ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache +LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache +# [tracing] +LOG_COLOR=false +TRACING_ENDPOINT_ADDR=0.0.0.0 +TRACING_ENDPOINT_PORT=3000 + +#### +# Test Variables +# These variables are used to run tests in the Dockerfile +# Check the entrypoint.sh script for more details +#### + +# Unit tests +# TODO: These variables are evaluated to any value, even setting a NULL value will evaluate to true +# TEST_FAKE_ACTIVATION_HEIGHTS= +# ZEBRA_SKIP_NETWORK_TESTS +# ZEBRA_SKIP_IPV6_TESTS +RUN_ALL_TESTS= +RUN_ALL_EXPERIMENTAL_TESTS= +TEST_ZEBRA_EMPTY_SYNC= +ZEBRA_TEST_LIGHTWALLETD= +# Integration Tests +# Most of these tests require a cached state directory to save the network state +TEST_DISK_REBUILD= +# These tests needs a Zebra cached state +TEST_CHECKPOINT_SYNC= +GENERATE_CHECKPOINTS_MAINNET= +GENERATE_CHECKPOINTS_TESTNET= +TEST_UPDATE_SYNC= +# These tests need a Lightwalletd binary + a Zebra cached state +TEST_LWD_RPC_CALL= +TEST_GET_BLOCK_TEMPLATE= +TEST_SUBMIT_BLOCK= +# These tests need a Lightwalletd binary + Lightwalletd cached state + a Zebra cached state +TEST_LWD_UPDATE_SYNC= +TEST_LWD_GRPC= +TEST_LWD_TRANSACTIONS= +# Full sync tests +# These tests could take a long time to run, depending on the network +FULL_SYNC_MAINNET_TIMEOUT_MINUTES= +FULL_SYNC_TESTNET_TIMEOUT_MINUTES= +TEST_LWD_FULL_SYNC= diff --git a/docker/zcash-lightwalletd/Dockerfile b/docker/zcash-lightwalletd/Dockerfile index 06ddcbfbd04..7b9d08194d2 100644 --- a/docker/zcash-lightwalletd/Dockerfile +++ b/docker/zcash-lightwalletd/Dockerfile @@ -42,7 +42,7 @@ CMD ["--no-tls-very-insecure", "--grpc-bind-addr=0.0.0.0:9067", "--http-bind-ad ## ## Deploy ## -FROM debian:bullseye-slim as runtime +FROM debian:bookworm-slim as runtime ARG ZCASHD_CONF_PATH # Maintain backward compatibility with mainstream repo using this ARGs in docker-compose diff --git a/docker/zcash-lightwalletd/zcash.conf b/docker/zcash-lightwalletd/zcash.conf new file mode 100644 index 00000000000..dc2a0238fc5 --- /dev/null +++ b/docker/zcash-lightwalletd/zcash.conf @@ -0,0 +1,5 @@ +rpcbind=zebra +rpcport=8232 +rpcuser=zcashrpc +rpcpassword=changeme +testnet=0 diff --git a/docker/zcash-params/Dockerfile b/docker/zcash-params/Dockerfile deleted file mode 100644 index dce8153d185..00000000000 --- a/docker/zcash-params/Dockerfile +++ /dev/null @@ -1,19 +0,0 @@ -# This image is for caching Zcash Sprout and Sapling parameters. -# We don't test it automatically in CI due to download server rate-limiting. -# To manually run it on the PR branch before merging, go to: -# https://github.com/ZcashFoundation/zebra/actions/workflows/zcash-params.yml - -FROM debian:bullseye-slim AS release - -# Just use the precompiled zebrad binary from a recent release image. -# -# It doesn't matter what build or commit of Zebra we use, because it just calls into the -# zcash_proofs download code. (Which doesn't change much.) -# Test image zebrad binaries would also work, but it's harder to get a recent tag for them. -# -# Compiling the download-params example using `cargo ` is another alternative: -# `cargo run --locked --release --features default-docker --example download-params` -COPY --from=zfnd/zebra:latest /usr/local/bin/zebrad /usr/local/bin - -# Pre-download Zcash Sprout and Sapling parameters -RUN zebrad download diff --git a/release.toml b/release.toml index 0cbcdd51772..bc47505b8f1 100644 --- a/release.toml +++ b/release.toml @@ -17,4 +17,4 @@ push = false tag = false # Owners for new crates -owners = [ 'dconnolly', 'teor2345', 'zcashfoundation/owners' ] +owners = [ 'oxarbitrage', 'teor2345', 'zcashfoundation/owners' ] diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index 7b63bc9e0ef..fb676a5bad9 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-batch-control" -version = "0.2.41-beta.6" +version = "0.2.41-beta.11" authors = ["Zcash Foundation ", "Tower Maintainers "] description = "Tower middleware for batch request processing" # # Legal @@ -22,12 +22,12 @@ keywords = ["tower", "batch"] categories = ["algorithms", "asynchronous"] [dependencies] -futures = "0.3.28" +futures = "0.3.30" futures-core = "0.3.28" -pin-project = "1.1.3" -rayon = "1.7.0" -tokio = { version = "1.33.0", features = ["time", "sync", "tracing", "macros"] } -tokio-util = "0.7.8" +pin-project = "1.1.4" +rayon = "1.8.1" +tokio = { version = "1.36.0", features = ["time", "sync", "tracing", "macros"] } +tokio-util = "0.7.10" tower = { version = "0.4.13", features = ["util", "buffer"] } tracing = "0.1.39" tracing-futures = "0.2.5" @@ -41,10 +41,9 @@ tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } ed25519-zebra = "4.0.3" rand = "0.8.5" -tokio = { version = "1.33.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.36.0", features = ["full", "tracing", "test-util"] } tokio-test = "0.4.3" -tower-fallback = { path = "../tower-fallback/" } +tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.11" } tower-test = "0.4.0" -zebra-consensus = { path = "../zebra-consensus/" } -zebra-test = { path = "../zebra-test/" } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.35" } diff --git a/tower-batch-control/LICENSE b/tower-batch-control/LICENSE index 9862976a6ce..5428318519b 100644 --- a/tower-batch-control/LICENSE +++ b/tower-batch-control/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2019-2023 Zcash Foundation +Copyright (c) 2019-2024 Zcash Foundation Copyright (c) 2019 Tower Contributors Permission is hereby granted, free of charge, to any diff --git a/tower-batch-control/tests/ed25519.rs b/tower-batch-control/tests/ed25519.rs index 773b1e3e017..5bbee2f72fb 100644 --- a/tower-batch-control/tests/ed25519.rs +++ b/tower-batch-control/tests/ed25519.rs @@ -1,18 +1,172 @@ //! Test batching using ed25519 verification. -use std::time::Duration; +use std::{ + mem, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; use color_eyre::{eyre::eyre, Report}; -use ed25519_zebra::*; +use ed25519_zebra::{batch, Error, SigningKey, VerificationKeyBytes}; use futures::stream::{FuturesOrdered, StreamExt}; +use futures::FutureExt; +use futures_core::Future; use rand::thread_rng; +use tokio::sync::{oneshot::error::RecvError, watch}; use tower::{Service, ServiceExt}; -use tower_batch_control::Batch; +use tower_batch_control::{Batch, BatchControl}; use tower_fallback::Fallback; // ============ service impl ============ -use zebra_consensus::ed25519::{Item as Ed25519Item, Verifier as Ed25519Verifier}; +/// A boxed [`std::error::Error`]. +type BoxError = Box; + +/// The type of the batch verifier. +type BatchVerifier = batch::Verifier; + +/// The type of verification results. +type VerifyResult = Result<(), Error>; + +/// The type of the batch sender channel. +type Sender = watch::Sender>; + +/// The type of the batch item. +/// This is an `Ed25519Item`. +type Item = batch::Item; + +/// Ed25519 signature verifier service +struct Verifier { + /// A batch verifier for ed25519 signatures. + batch: BatchVerifier, + + /// A channel for broadcasting the result of a batch to the futures for each batch item. + /// + /// Each batch gets a newly created channel, so there is only ever one result sent per channel. + /// Tokio doesn't have a oneshot multi-consumer channel, so we use a watch channel. + tx: Sender, +} + +impl Default for Verifier { + fn default() -> Self { + let batch = BatchVerifier::default(); + let (tx, _) = watch::channel(None); + Self { batch, tx } + } +} + +impl Verifier { + /// Returns the batch verifier and channel sender from `self`, + /// replacing them with a new empty batch. + fn take(&mut self) -> (BatchVerifier, Sender) { + // Use a new verifier and channel for each batch. + let batch = mem::take(&mut self.batch); + + let (tx, _) = watch::channel(None); + let tx = mem::replace(&mut self.tx, tx); + + (batch, tx) + } + + /// Synchronously process the batch, and send the result using the channel sender. + /// This function blocks until the batch is completed. + fn verify(batch: BatchVerifier, tx: Sender) { + let result = batch.verify(thread_rng()); + let _ = tx.send(Some(result)); + } + + /// Flush the batch using a thread pool, and return the result via the channel. + /// This returns immediately, usually before the batch is completed. + fn flush_blocking(&mut self) { + let (batch, tx) = self.take(); + + // Correctness: Do CPU-intensive work on a dedicated thread, to avoid blocking other futures. + // + // We don't care about execution order here, because this method is only called on drop. + tokio::task::block_in_place(|| rayon::spawn_fifo(|| Self::verify(batch, tx))); + } + + /// Flush the batch using a thread pool, and return the result via the channel. + /// This function returns a future that becomes ready when the batch is completed. + async fn flush_spawning(batch: BatchVerifier, tx: Sender) { + // Correctness: Do CPU-intensive work on a dedicated thread, to avoid blocking other futures. + let _ = tx.send(spawn_fifo(move || batch.verify(thread_rng())).await.ok()); + } +} + +impl Service> for Verifier { + type Response = (); + type Error = BoxError; + type Future = Pin> + Send + 'static>>; + + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: BatchControl) -> Self::Future { + match req { + BatchControl::Item(item) => { + tracing::trace!("got ed25519 item"); + self.batch.queue(item); + let mut rx = self.tx.subscribe(); + + Box::pin(async move { + match rx.changed().await { + Ok(()) => { + // We use a new channel for each batch, + // so we always get the correct batch result here. + let result = rx.borrow() + .ok_or("threadpool unexpectedly dropped response channel sender. Is Zebra shutting down?")?; + + if result.is_ok() { + tracing::trace!(?result, "validated ed25519 signature"); + } else { + tracing::trace!(?result, "invalid ed25519 signature"); + } + result.map_err(BoxError::from) + } + Err(_recv_error) => panic!("ed25519 verifier was dropped without flushing"), + } + }) + } + + BatchControl::Flush => { + tracing::trace!("got ed25519 flush command"); + + let (batch, tx) = self.take(); + + Box::pin(Self::flush_spawning(batch, tx).map(Ok)) + } + } + } +} + +impl Drop for Verifier { + fn drop(&mut self) { + // We need to flush the current batch in case there are still any pending futures. + // This returns immediately, usually before the batch is completed. + self.flush_blocking(); + } +} + +/// Fires off a task into the Rayon threadpool and awaits the result through a oneshot channel. +async fn spawn_fifo< + E: 'static + std::error::Error + Sync + Send, + F: 'static + FnOnce() -> Result<(), E> + Send, +>( + f: F, +) -> Result, RecvError> { + // Rayon doesn't have a spawn function that returns a value, + // so we use a oneshot channel instead. + let (rsp_tx, rsp_rx) = tokio::sync::oneshot::channel(); + + rayon::spawn_fifo(move || { + let _ = rsp_tx.send(f()); + }); + + rsp_rx.await +} // =============== testing code ======== @@ -22,7 +176,7 @@ async fn sign_and_verify( bad_index: Option, ) -> Result<(), V::Error> where - V: Service, + V: Service, { let mut results = FuturesOrdered::new(); for i in 0..n { @@ -61,7 +215,7 @@ async fn batch_flushes_on_max_items() -> Result<(), Report> { // flushing is happening based on hitting max_items. // // Create our own verifier, so we don't shut down a shared verifier used by other tests. - let verifier = Batch::new(Ed25519Verifier::default(), 10, 5, Duration::from_secs(1000)); + let verifier = Batch::new(Verifier::default(), 10, 5, Duration::from_secs(1000)); timeout(Duration::from_secs(1), sign_and_verify(verifier, 100, None)) .await .map_err(|e| eyre!(e))? @@ -79,12 +233,7 @@ async fn batch_flushes_on_max_latency() -> Result<(), Report> { // flushing is happening based on hitting max_latency. // // Create our own verifier, so we don't shut down a shared verifier used by other tests. - let verifier = Batch::new( - Ed25519Verifier::default(), - 100, - 10, - Duration::from_millis(500), - ); + let verifier = Batch::new(Verifier::default(), 100, 10, Duration::from_millis(500)); timeout(Duration::from_secs(1), sign_and_verify(verifier, 10, None)) .await .map_err(|e| eyre!(e))? @@ -99,13 +248,8 @@ async fn fallback_verification() -> Result<(), Report> { // Create our own verifier, so we don't shut down a shared verifier used by other tests. let verifier = Fallback::new( - Batch::new( - Ed25519Verifier::default(), - 10, - 1, - Duration::from_millis(100), - ), - tower::service_fn(|item: Ed25519Item| async move { item.verify_single() }), + Batch::new(Verifier::default(), 10, 1, Duration::from_millis(100)), + tower::service_fn(|item: Item| async move { item.verify_single() }), ); sign_and_verify(verifier, 100, Some(39)) diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index 4aa8a698d6a..cfefb8337ac 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-fallback" -version = "0.2.41-beta.6" +version = "0.2.41-beta.11" authors = ["Zcash Foundation "] description = "A Tower service combinator that sends requests to a first service, then retries processing on a second fallback service if the first service errors." license = "MIT OR Apache-2.0" @@ -16,12 +16,12 @@ keywords = ["tower", "batch"] categories = ["algorithms", "asynchronous"] [dependencies] -pin-project = "1.1.3" +pin-project = "1.1.4" tower = "0.4.13" futures-core = "0.3.28" tracing = "0.1.39" [dev-dependencies] -tokio = { version = "1.33.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.36.0", features = ["full", "tracing", "test-util"] } -zebra-test = { path = "../zebra-test/" } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.35" } diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 60a9785322d..3e714ccd63b 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-chain" -version = "1.0.0-beta.30" +version = "1.0.0-beta.35" authors = ["Zcash Foundation "] description = "Core Zcash data structures" license = "MIT OR Apache-2.0" @@ -29,11 +29,22 @@ async-error = [ "tokio", ] -# Experimental mining RPC support +# Mining RPC support getblocktemplate-rpcs = [ "zcash_address", ] +# Experimental shielded scanning support +shielded-scan = [ + "zcash_client_backend" +] + +# Experimental internal miner support +# TODO: Internal miner feature functionality was removed at https://github.com/ZcashFoundation/zebra/issues/8180 +# See what was removed at https://github.com/ZcashFoundation/zebra/blob/v1.5.1/zebra-chain/Cargo.toml#L38-L43 +# Restore support when conditions are met. https://github.com/ZcashFoundation/zebra/issues/8183 +internal-miner = [] + # Experimental elasticsearch support elasticsearch = [] @@ -54,19 +65,24 @@ bench = ["zebra-test"] # Cryptography bitvec = "1.0.1" -bitflags = "2.4.1" +bitflags = "2.4.2" bitflags-serde-legacy = "0.1.1" blake2b_simd = "1.0.2" blake2s_simd = "1.0.2" bridgetree = "0.4.0" bs58 = { version = "0.5.0", features = ["check"] } byteorder = "1.5.0" + +# TODO: Internal miner feature functionality was removed at https://github.com/ZcashFoundation/zebra/issues/8180 +# See what was removed at https://github.com/ZcashFoundation/zebra/blob/v1.5.1/zebra-chain/Cargo.toml#L73-L85 +# Restore support when conditions are met. https://github.com/ZcashFoundation/zebra/issues/8183 equihash = "0.2.0" + group = "0.13.0" incrementalmerkletree = "0.5.0" jubjub = "0.10.0" lazy_static = "1.4.0" -num-integer = "0.1.45" +num-integer = "0.1.46" primitive-types = "0.11.1" rand_core = "0.6.4" ripemd = "0.1.3" @@ -74,7 +90,7 @@ ripemd = "0.1.3" secp256k1 = { version = "0.26.0", features = ["serde"] } sha2 = { version = "0.10.7", features = ["compress"] } uint = "0.9.5" -x25519-dalek = { version = "2.0.0-rc.3", features = ["serde"] } +x25519-dalek = { version = "2.0.1", features = ["serde"] } # ECC deps halo2 = { package = "halo2_proofs", version = "0.3.0" } @@ -85,25 +101,25 @@ zcash_note_encryption = "0.4.0" zcash_primitives = { version = "0.13.0-rc.1", features = ["transparent-inputs"] } # Time -chrono = { version = "0.4.31", default-features = false, features = ["clock", "std", "serde"] } +chrono = { version = "0.4.34", default-features = false, features = ["clock", "std", "serde"] } humantime = "2.1.0" # Error Handling & Formatting displaydoc = "0.2.4" static_assertions = "1.1.0" -thiserror = "1.0.48" +thiserror = "1.0.57" tracing = "0.1.39" # Serialization hex = { version = "0.4.3", features = ["serde"] } -serde = { version = "1.0.188", features = ["serde_derive", "rc"] } -serde_with = "3.4.0" +serde = { version = "1.0.196", features = ["serde_derive", "rc"] } +serde_with = "3.6.1" serde-big-array = "0.5.1" # Processing -futures = "0.3.28" -itertools = "0.11.0" -rayon = "1.7.0" +futures = "0.3.30" +itertools = "0.12.1" +rayon = "1.8.1" # ZF deps ed25519-zebra = "4.0.3" @@ -111,22 +127,25 @@ redjubjub = "0.7.0" reddsa = "0.5.1" # Production feature json-conversion -serde_json = { version = "1.0.107", optional = true } +serde_json = { version = "1.0.113", optional = true } # Production feature async-error and testing feature proptest-impl -tokio = { version = "1.33.0", optional = true } +tokio = { version = "1.36.0", optional = true } + +# Production feature getblocktemplate-rpcs +zcash_address = { version = "0.3.1", optional = true } -# Experimental feature getblocktemplate-rpcs -zcash_address = { version = "0.3.0", optional = true } +# Experimental feature shielded-scan +zcash_client_backend = { version = "0.10.0-rc.1", optional = true } # Optional testing dependencies -proptest = { version = "1.3.1", optional = true } +proptest = { version = "1.4.0", optional = true } proptest-derive = { version = "0.4.0", optional = true } rand = { version = "0.8.5", optional = true } rand_chacha = { version = "0.3.1", optional = true } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.30", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.35", optional = true } [dev-dependencies] # Benchmarks @@ -141,15 +160,15 @@ spandoc = "0.2.2" tracing = "0.1.39" # Make the optional testing dependencies required -proptest = "1.3.1" +proptest = "1.4.0" proptest-derive = "0.4.0" rand = "0.8.5" rand_chacha = "0.3.1" -tokio = { version = "1.33.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.36.0", features = ["full", "tracing", "test-util"] } -zebra-test = { path = "../zebra-test/" } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.35" } [[bench]] name = "block" diff --git a/zebra-chain/src/amount.rs b/zebra-chain/src/amount.rs index b07a59642bd..ae7f5726358 100644 --- a/zebra-chain/src/amount.rs +++ b/zebra-chain/src/amount.rs @@ -491,7 +491,7 @@ impl Error { /// -MAX_MONEY..=MAX_MONEY, /// ); /// ``` -#[derive(Clone, Copy, Debug, Eq, PartialEq)] +#[derive(Clone, Copy, Debug, Eq, PartialEq, Default)] pub struct NegativeAllowed; impl Constraint for NegativeAllowed { diff --git a/zebra-chain/src/block.rs b/zebra-chain/src/block.rs index 8b1e17ba35f..cee913dd5a4 100644 --- a/zebra-chain/src/block.rs +++ b/zebra-chain/src/block.rs @@ -78,8 +78,8 @@ impl Block { /// Verified blocks have a valid height. pub fn coinbase_height(&self) -> Option { self.transactions - .get(0) - .and_then(|tx| tx.inputs().get(0)) + .first() + .and_then(|tx| tx.inputs().first()) .and_then(|input| match input { transparent::Input::Coinbase { ref height, .. } => Some(*height), _ => None, diff --git a/zebra-chain/src/block/arbitrary.rs b/zebra-chain/src/block/arbitrary.rs index dc06d4ff43e..4db23bfd26d 100644 --- a/zebra-chain/src/block/arbitrary.rs +++ b/zebra-chain/src/block/arbitrary.rs @@ -665,7 +665,7 @@ where attempts += 1; // Avoid O(n^2) algorithmic complexity by giving up early, - // rather than exhausively checking the entire UTXO set + // rather than exhaustively checking the entire UTXO set if attempts > 100 { return None; } diff --git a/zebra-chain/src/block/hash.rs b/zebra-chain/src/block/hash.rs index bf2922054fa..45b96194c74 100644 --- a/zebra-chain/src/block/hash.rs +++ b/zebra-chain/src/block/hash.rs @@ -21,7 +21,7 @@ use proptest_derive::Arbitrary; /// Note: Zebra displays transaction and block hashes in big-endian byte-order, /// following the u256 convention set by Bitcoin and zcashd. #[derive(Copy, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)] -#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))] +#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary, Default))] pub struct Hash(pub [u8; 32]); impl Hash { @@ -85,10 +85,9 @@ impl FromHex for Hash { type Error = <[u8; 32] as FromHex>::Error; fn from_hex>(hex: T) -> Result { - let mut hash = <[u8; 32]>::from_hex(hex)?; - hash.reverse(); + let hash = <[u8; 32]>::from_hex(hex)?; - Ok(hash.into()) + Ok(Self::from_bytes_in_display_order(&hash)) } } @@ -148,12 +147,6 @@ impl ZcashDeserialize for Hash { impl std::str::FromStr for Hash { type Err = SerializationError; fn from_str(s: &str) -> Result { - let mut bytes = [0; 32]; - if hex::decode_to_slice(s, &mut bytes[..]).is_err() { - Err(SerializationError::Parse("hex decoding error")) - } else { - bytes.reverse(); - Ok(Hash(bytes)) - } + Ok(Self::from_hex(s)?) } } diff --git a/zebra-chain/src/block/header.rs b/zebra-chain/src/block/header.rs index c59d3972ff5..1bbec3b471c 100644 --- a/zebra-chain/src/block/header.rs +++ b/zebra-chain/src/block/header.rs @@ -123,6 +123,11 @@ impl Header { ))? } } + + /// Compute the hash of this header. + pub fn hash(&self) -> Hash { + Hash::from(self) + } } /// A header with a count of the number of transactions in its block. diff --git a/zebra-chain/src/block/height.rs b/zebra-chain/src/block/height.rs index c5fb22efde1..71f664c75a9 100644 --- a/zebra-chain/src/block/height.rs +++ b/zebra-chain/src/block/height.rs @@ -2,6 +2,7 @@ use std::ops::{Add, Sub}; use thiserror::Error; +use zcash_primitives::consensus::BlockHeight; use crate::{serialization::SerializationError, BoxError}; @@ -23,6 +24,7 @@ pub mod json_conversion; /// There are multiple formats for serializing a height, so we don't implement /// `ZcashSerialize` or `ZcashDeserialize` for `Height`. #[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[cfg_attr(any(test, feature = "proptest-impl"), derive(Default))] pub struct Height(pub u32); #[derive(Error, Debug)] @@ -104,6 +106,21 @@ impl Height { } } +impl From for BlockHeight { + fn from(height: Height) -> Self { + BlockHeight::from_u32(height.0) + } +} + +impl TryFrom for Height { + type Error = &'static str; + + /// Checks that the `height` is within the valid [`Height`] range. + fn try_from(height: BlockHeight) -> Result { + Self::try_from(u32::from(height)) + } +} + /// A difference between two [`Height`]s, possibly negative. /// /// This can represent the difference between any height values, diff --git a/zebra-chain/src/block/merkle.rs b/zebra-chain/src/block/merkle.rs index 42762bbe6ca..639324f9d82 100644 --- a/zebra-chain/src/block/merkle.rs +++ b/zebra-chain/src/block/merkle.rs @@ -70,7 +70,7 @@ use proptest_derive::Arbitrary; /// /// [ZIP-244]: https://zips.z.cash/zip-0244 #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize)] -#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))] +#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary, Default))] pub struct Root(pub [u8; 32]); impl fmt::Debug for Root { diff --git a/zebra-chain/src/block/tests/vectors.rs b/zebra-chain/src/block/tests/vectors.rs index 257614becf1..1cbf17f8cee 100644 --- a/zebra-chain/src/block/tests/vectors.rs +++ b/zebra-chain/src/block/tests/vectors.rs @@ -385,7 +385,7 @@ fn node_time_check( fn time_check_now() { let _init_guard = zebra_test::init(); - // These checks are deteministic, because all the times are offset + // These checks are deterministic, because all the times are offset // from the current time. let now = Utc::now(); let three_hours_in_the_past = now - Duration::hours(3); diff --git a/zebra-chain/src/diagnostic/task.rs b/zebra-chain/src/diagnostic/task.rs index 2d43f695537..bbdb72fecfa 100644 --- a/zebra-chain/src/diagnostic/task.rs +++ b/zebra-chain/src/diagnostic/task.rs @@ -9,7 +9,7 @@ pub mod future; pub mod thread; /// A trait that checks a task's return value for panics. -pub trait CheckForPanics { +pub trait CheckForPanics: Sized { /// The output type, after removing panics from `Self`. type Output; @@ -20,11 +20,34 @@ pub trait CheckForPanics { /// /// If `self` contains a panic payload or an unexpected termination. #[track_caller] - fn check_for_panics(self) -> Self::Output; + fn panic_if_task_has_finished(self) -> Self::Output { + self.check_for_panics_with(true) + } + + /// Check if `self` contains a panic payload, then panic. + /// Otherwise, return the non-panic part of `self`. + /// + /// # Panics + /// + /// If `self` contains a panic payload. + #[track_caller] + fn panic_if_task_has_panicked(self) -> Self::Output { + self.check_for_panics_with(false) + } + + /// Check if `self` contains a panic payload, then panic. Also panics if + /// `panic_on_unexpected_termination` is true, and `self` is an unexpected termination. + /// Otherwise, return the non-panic part of `self`. + /// + /// # Panics + /// + /// If `self` contains a panic payload, or if we're panicking on unexpected terminations. + #[track_caller] + fn check_for_panics_with(self, panic_on_unexpected_termination: bool) -> Self::Output; } /// A trait that waits for a task to finish, then handles panics and cancellations. -pub trait WaitForPanics { +pub trait WaitForPanics: Sized { /// The underlying task output, after removing panics and unwrapping termination results. type Output; @@ -43,5 +66,44 @@ pub trait WaitForPanics { /// /// If `self` contains an expected termination, and we're shutting down anyway. #[track_caller] - fn wait_for_panics(self) -> Self::Output; + fn wait_and_panic_on_unexpected_termination(self) -> Self::Output { + self.wait_for_panics_with(true) + } + + /// Waits for `self` to finish, then check if its output is: + /// - a panic payload: resume that panic, + /// - a task termination: hang waiting for shutdown. + /// + /// Otherwise, returns the task return value of `self`. + /// + /// # Panics + /// + /// If `self` contains a panic payload. + /// + /// # Hangs + /// + /// If `self` contains a task termination. + #[track_caller] + fn wait_for_panics(self) -> Self::Output { + self.wait_for_panics_with(false) + } + + /// Waits for `self` to finish, then check if its output is: + /// - a panic payload: resume that panic, + /// - an unexpected termination: + /// - if `panic_on_unexpected_termination` is true, panic with that error, + /// - otherwise, hang waiting for shutdown, + /// - an expected termination: hang waiting for shutdown. + /// + /// Otherwise, returns the task return value of `self`. + /// + /// # Panics + /// + /// If `self` contains a panic payload, or if we're panicking on unexpected terminations. + /// + /// # Hangs + /// + /// If `self` contains an expected or ignored termination, and we're shutting down anyway. + #[track_caller] + fn wait_for_panics_with(self, panic_on_unexpected_termination: bool) -> Self::Output; } diff --git a/zebra-chain/src/diagnostic/task/future.rs b/zebra-chain/src/diagnostic/task/future.rs index 431b13ed94f..a119d7c6234 100644 --- a/zebra-chain/src/diagnostic/task/future.rs +++ b/zebra-chain/src/diagnostic/task/future.rs @@ -18,15 +18,18 @@ impl CheckForPanics for Result { type Output = Result; /// Returns the task result if the task finished normally. - /// Otherwise, resumes any panics, logs unexpected errors, and ignores any expected errors. + /// Otherwise, resumes any panics, and ignores any expected errors. + /// Handles unexpected errors based on `panic_on_unexpected_termination`. /// /// If the task finished normally, returns `Some(T)`. /// If the task was cancelled, returns `None`. #[track_caller] - fn check_for_panics(self) -> Self::Output { + fn check_for_panics_with(self, panic_on_unexpected_termination: bool) -> Self::Output { match self { Ok(task_output) => Ok(task_output), - Err(join_error) => Err(join_error.check_for_panics()), + Err(join_error) => { + Err(join_error.check_for_panics_with(panic_on_unexpected_termination)) + } } } } @@ -38,22 +41,26 @@ impl CheckForPanics for JoinError { /// Resume any panics and panic on unexpected task cancellations. /// Always returns [`JoinError::Cancelled`](JoinError::is_cancelled). #[track_caller] - fn check_for_panics(self) -> Self::Output { + fn check_for_panics_with(self, panic_on_unexpected_termination: bool) -> Self::Output { match self.try_into_panic() { Ok(panic_payload) => panic::resume_unwind(panic_payload), // We could ignore this error, but then we'd have to change the return type. - Err(task_cancelled) if is_shutting_down() => { - debug!( - ?task_cancelled, - "ignoring cancelled task because Zebra is shutting down" - ); + Err(task_cancelled) => { + if !panic_on_unexpected_termination { + debug!(?task_cancelled, "ignoring expected task termination"); - task_cancelled - } + task_cancelled + } else if is_shutting_down() { + debug!( + ?task_cancelled, + "ignoring task termination because Zebra is shutting down" + ); - Err(task_cancelled) => { - panic!("task cancelled during normal Zebra operation: {task_cancelled:?}"); + task_cancelled + } else { + panic!("task unexpectedly exited with: {:?}", task_cancelled) + } } } } @@ -67,7 +74,9 @@ where /// Returns a future which waits for `self` to finish, then checks if its output is: /// - a panic payload: resume that panic, - /// - an unexpected termination: panic with that error, + /// - an unexpected termination: + /// - if `panic_on_unexpected_termination` is true, panic with that error, + /// - otherwise, hang waiting for shutdown, /// - an expected termination: hang waiting for shutdown. /// /// Otherwise, returns the task return value of `self`. @@ -79,11 +88,15 @@ where /// # Hangs /// /// If `self` contains an expected termination, and we're shutting down anyway. + /// If we're ignoring terminations because `panic_on_unexpected_termination` is `false`. /// Futures hang by returning `Pending` and not setting a waker, so this uses minimal resources. #[track_caller] - fn wait_for_panics(self) -> Self::Output { + fn wait_for_panics_with(self, panic_on_unexpected_termination: bool) -> Self::Output { async move { - match self.await.check_for_panics() { + match self + .await + .check_for_panics_with(panic_on_unexpected_termination) + { Ok(task_output) => task_output, Err(_expected_cancel_error) => future::pending().await, } diff --git a/zebra-chain/src/diagnostic/task/thread.rs b/zebra-chain/src/diagnostic/task/thread.rs index 84df3fac4aa..517aef01411 100644 --- a/zebra-chain/src/diagnostic/task/thread.rs +++ b/zebra-chain/src/diagnostic/task/thread.rs @@ -8,19 +8,44 @@ use std::{ thread::{self, JoinHandle}, }; +use crate::shutdown::is_shutting_down; + use super::{CheckForPanics, WaitForPanics}; -impl CheckForPanics for thread::Result { +impl CheckForPanics for thread::Result +where + T: std::fmt::Debug, +{ type Output = T; - /// Panics if the thread panicked. + /// # Panics + /// + /// - if the thread panicked. + /// - if the thread is cancelled, `panic_on_unexpected_termination` is true, and + /// Zebra is not shutting down. /// /// Threads can't be cancelled except by using a panic, so there are no thread errors here. + /// `panic_on_unexpected_termination` is #[track_caller] - fn check_for_panics(self) -> Self::Output { + fn check_for_panics_with(self, panic_on_unexpected_termination: bool) -> Self::Output { match self { // The value returned by the thread when it finished. - Ok(thread_output) => thread_output, + Ok(thread_output) => { + if !panic_on_unexpected_termination { + debug!(?thread_output, "ignoring expected thread exit"); + + thread_output + } else if is_shutting_down() { + debug!( + ?thread_output, + "ignoring thread exit because Zebra is shutting down" + ); + + thread_output + } else { + panic!("thread unexpectedly exited with: {:?}", thread_output) + } + } // A thread error is always a panic. Err(panic_payload) => panic::resume_unwind(panic_payload), @@ -28,17 +53,24 @@ impl CheckForPanics for thread::Result { } } -impl WaitForPanics for JoinHandle { +impl WaitForPanics for JoinHandle +where + T: std::fmt::Debug, +{ type Output = T; /// Waits for the thread to finish, then panics if the thread panicked. #[track_caller] - fn wait_for_panics(self) -> Self::Output { - self.join().check_for_panics() + fn wait_for_panics_with(self, panic_on_unexpected_termination: bool) -> Self::Output { + self.join() + .check_for_panics_with(panic_on_unexpected_termination) } } -impl WaitForPanics for Arc> { +impl WaitForPanics for Arc> +where + T: std::fmt::Debug, +{ type Output = Option; /// If this is the final `Arc`, waits for the thread to finish, then panics if the thread @@ -46,7 +78,7 @@ impl WaitForPanics for Arc> { /// /// If this is not the final `Arc`, drops the handle and immediately returns `None`. #[track_caller] - fn wait_for_panics(self) -> Self::Output { + fn wait_for_panics_with(self, panic_on_unexpected_termination: bool) -> Self::Output { // If we are the last Arc with a reference to this handle, // we can wait for it and propagate any panics. // @@ -56,14 +88,17 @@ impl WaitForPanics for Arc> { // This is more readable as an expanded statement. #[allow(clippy::manual_map)] if let Some(handle) = Arc::into_inner(self) { - Some(handle.wait_for_panics()) + Some(handle.wait_for_panics_with(panic_on_unexpected_termination)) } else { None } } } -impl CheckForPanics for &mut Option>> { +impl CheckForPanics for &mut Option>> +where + T: std::fmt::Debug, +{ type Output = Option; /// If this is the final `Arc`, checks if the thread has finished, then panics if the thread @@ -71,14 +106,14 @@ impl CheckForPanics for &mut Option>> { /// /// If the thread has not finished, or this is not the final `Arc`, returns `None`. #[track_caller] - fn check_for_panics(self) -> Self::Output { + fn check_for_panics_with(self, panic_on_unexpected_termination: bool) -> Self::Output { let handle = self.take()?; if handle.is_finished() { // This is the same as calling `self.wait_for_panics()`, but we can't do that, // because we've taken `self`. #[allow(clippy::manual_map)] - return handle.wait_for_panics(); + return handle.wait_for_panics_with(panic_on_unexpected_termination); } *self = Some(handle); @@ -87,7 +122,10 @@ impl CheckForPanics for &mut Option>> { } } -impl WaitForPanics for &mut Option>> { +impl WaitForPanics for &mut Option>> +where + T: std::fmt::Debug, +{ type Output = Option; /// If this is the final `Arc`, waits for the thread to finish, then panics if the thread @@ -95,10 +133,13 @@ impl WaitForPanics for &mut Option>> { /// /// If this is not the final `Arc`, drops the handle and returns `None`. #[track_caller] - fn wait_for_panics(self) -> Self::Output { + fn wait_for_panics_with(self, panic_on_unexpected_termination: bool) -> Self::Output { // This is more readable as an expanded statement. #[allow(clippy::manual_map)] - if let Some(output) = self.take()?.wait_for_panics() { + if let Some(output) = self + .take()? + .wait_for_panics_with(panic_on_unexpected_termination) + { Some(output) } else { // Some other task has a reference, so we should give up ours to let them use it. diff --git a/zebra-chain/src/fmt.rs b/zebra-chain/src/fmt.rs index 98923446c99..708e6302121 100644 --- a/zebra-chain/src/fmt.rs +++ b/zebra-chain/src/fmt.rs @@ -163,7 +163,7 @@ where /// Wrapper to override `Debug`, redirecting it to hex-encode the type. /// The type must implement `AsRef<[u8]>`. -#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Default)] #[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))] #[serde(transparent)] pub struct HexDebug>(pub T); diff --git a/zebra-chain/src/history_tree.rs b/zebra-chain/src/history_tree.rs index c11a9bfcc12..696369587c0 100644 --- a/zebra-chain/src/history_tree.rs +++ b/zebra-chain/src/history_tree.rs @@ -501,6 +501,12 @@ impl From for HistoryTree { } } +impl From> for HistoryTree { + fn from(tree: Option) -> Self { + HistoryTree(tree) + } +} + impl Deref for HistoryTree { type Target = Option; fn deref(&self) -> &Self::Target { diff --git a/zebra-chain/src/lib.rs b/zebra-chain/src/lib.rs index 212accf3e71..2182e1ba175 100644 --- a/zebra-chain/src/lib.rs +++ b/zebra-chain/src/lib.rs @@ -5,19 +5,9 @@ #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] -#![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_chain")] +#![doc(html_root_url = "https://docs.rs/zebra_chain")] // Required by bitvec! macro #![recursion_limit = "256"] -// -// Rust 1.72 has a false positive when nested generics are used inside Arc. -// This makes the `arc_with_non_send_sync` lint trigger on a lot of proptest code. -// -// TODO: remove this allow when Rust 1.73 is stable, because this lint bug is fixed in that release: -// -#![cfg_attr( - any(test, feature = "proptest-impl"), - allow(clippy::arc_with_non_send_sync) -)] #[macro_use] extern crate bitflags; diff --git a/zebra-chain/src/orchard/keys.rs b/zebra-chain/src/orchard/keys.rs index 3fd9e0ce67a..1e01ec0b185 100644 --- a/zebra-chain/src/orchard/keys.rs +++ b/zebra-chain/src/orchard/keys.rs @@ -87,17 +87,12 @@ impl PartialEq<[u8; 11]> for Diversifier { } } -impl TryFrom for pallas::Affine { - type Error = &'static str; - +impl From for pallas::Affine { /// Get a diversified base point from a diversifier value in affine /// representation. - fn try_from(d: Diversifier) -> Result { - if let Ok(projective_point) = pallas::Point::try_from(d) { - Ok(projective_point.into()) - } else { - Err("Invalid Diversifier -> pallas::Affine") - } + fn from(d: Diversifier) -> Self { + let projective_point = pallas::Point::from(d); + projective_point.into() } } diff --git a/zebra-chain/src/orchard/note.rs b/zebra-chain/src/orchard/note.rs index 8d46220ff86..822584912f3 100644 --- a/zebra-chain/src/orchard/note.rs +++ b/zebra-chain/src/orchard/note.rs @@ -20,8 +20,13 @@ pub use nullifiers::Nullifier; #[cfg(any(test, feature = "proptest-impl"))] mod arbitrary; -#[derive(Clone, Copy, Debug)] /// A random seed (rseed) used in the Orchard note creation. +#[derive(Clone, Copy, Debug)] +// At the moment this field is never read. +// +// TODO: consider replacing this code with the equivalent `orchard` crate code, +// which is better tested. +#[allow(dead_code)] pub struct SeedRandomness(pub(crate) [u8; 32]); impl SeedRandomness { diff --git a/zebra-chain/src/orchard/sinsemilla.rs b/zebra-chain/src/orchard/sinsemilla.rs index d7d05813a5c..6fa8bad751d 100644 --- a/zebra-chain/src/orchard/sinsemilla.rs +++ b/zebra-chain/src/orchard/sinsemilla.rs @@ -174,7 +174,7 @@ mod tests { #[test] #[allow(non_snake_case)] - fn single_test_vector() { + fn sinsemilla_single_test_vector() { use halo2::pasta::group::Curve; let D = b"z.cash:test-Sinsemilla"; @@ -206,7 +206,7 @@ mod tests { // - Random message bits. #[test] #[allow(non_snake_case)] - fn hackworks_test_vectors() { + fn sinsemilla_hackworks_test_vectors() { use halo2::pasta::group::{ff::PrimeField, GroupEncoding}; for tv in tests::vectors::SINSEMILLA.iter() { @@ -231,7 +231,7 @@ mod tests { // - Random message contents. #[test] #[allow(non_snake_case)] - fn hackworks_group_hash_test_vectors() { + fn sinsemilla_hackworks_group_hash_test_vectors() { use halo2::pasta::group::GroupEncoding; for tv in tests::vectors::GROUP_HASHES.iter() { diff --git a/zebra-chain/src/orchard/tests/vectors.rs b/zebra-chain/src/orchard/tests/vectors.rs index 8d0576ed9b7..6bb3b24c502 100644 --- a/zebra-chain/src/orchard/tests/vectors.rs +++ b/zebra-chain/src/orchard/tests/vectors.rs @@ -4,6 +4,6 @@ mod sinsemilla; mod tree; pub use group_hash::GROUP_HASHES; -pub use key_components::KEY_COMPONENTS; + pub use sinsemilla::SINSEMILLA; pub use tree::{COMMITMENTS, EMPTY_ROOTS, ROOTS}; diff --git a/zebra-chain/src/orchard/tests/vectors/sinsemilla.rs b/zebra-chain/src/orchard/tests/vectors/sinsemilla.rs index 12e2eed4949..73d67b77570 100644 --- a/zebra-chain/src/orchard/tests/vectors/sinsemilla.rs +++ b/zebra-chain/src/orchard/tests/vectors/sinsemilla.rs @@ -7,7 +7,7 @@ pub struct TestVector { pub(crate) hash: [u8; 32], } -// From https://github.com/zcash-hackworks/zcash-test-vectors/blob/master/orchard_sinsemilla.py +// From https://github.com/zcash/zcash-test-vectors/blob/master/test-vectors/rust/orchard_sinsemilla.rs lazy_static! { pub static ref SINSEMILLA: [TestVector; 11] = [ TestVector { diff --git a/zebra-chain/src/orchard/tree.rs b/zebra-chain/src/orchard/tree.rs index 3b5cb768107..9da1b00ab35 100644 --- a/zebra-chain/src/orchard/tree.rs +++ b/zebra-chain/src/orchard/tree.rs @@ -325,6 +325,10 @@ pub enum NoteCommitmentTreeError { } /// Orchard Incremental Note Commitment Tree +/// +/// Note that the default value of the [`Root`] type is `[0, 0, 0, 0]`. However, this value differs +/// from the default value of the root of the default tree which is the hash of the root's child +/// nodes. The default tree is the empty tree which has all leaves empty. #[derive(Debug, Serialize, Deserialize)] #[serde(into = "LegacyNoteCommitmentTree")] #[serde(from = "LegacyNoteCommitmentTree")] diff --git a/zebra-chain/src/parallel/tree.rs b/zebra-chain/src/parallel/tree.rs index 88eb6a3b94d..4f35dd44617 100644 --- a/zebra-chain/src/parallel/tree.rs +++ b/zebra-chain/src/parallel/tree.rs @@ -11,7 +11,9 @@ use crate::{ }; /// An argument wrapper struct for note commitment trees. -#[derive(Clone, Debug)] +/// +/// The default instance represents the trees and subtrees that correspond to the genesis block. +#[derive(Clone, Debug, Default, Eq, PartialEq)] pub struct NoteCommitmentTrees { /// The sprout note commitment tree. pub sprout: Arc, diff --git a/zebra-chain/src/parameters/network.rs b/zebra-chain/src/parameters/network.rs index c90ce860798..15a6d392476 100644 --- a/zebra-chain/src/parameters/network.rs +++ b/zebra-chain/src/parameters/network.rs @@ -85,6 +85,12 @@ impl fmt::Display for Network { } impl Network { + /// Returns an iterator over [`Network`] variants. + pub fn iter() -> impl Iterator { + // TODO: Use default values of `Testnet` variant when adding fields for #7845. + [Self::Mainnet, Self::Testnet].into_iter() + } + /// Get the default port associated to this network. pub fn default_port(&self) -> u16 { match self { @@ -129,6 +135,13 @@ impl Network { pub fn is_a_test_network(&self) -> bool { *self != Network::Mainnet } + + /// Returns the Sapling activation height for this network. + pub fn sapling_activation_height(self) -> Height { + super::NetworkUpgrade::Sapling + .activation_height(self) + .expect("Sapling activation height needs to be set") + } } impl FromStr for Network { diff --git a/zebra-chain/src/primitives.rs b/zebra-chain/src/primitives.rs index 9b5056bc620..f5b14f8cece 100644 --- a/zebra-chain/src/primitives.rs +++ b/zebra-chain/src/primitives.rs @@ -12,6 +12,11 @@ mod address; #[cfg(feature = "getblocktemplate-rpcs")] pub use address::Address; +#[cfg(feature = "shielded-scan")] +pub mod viewing_key; + +pub mod byte_array; + pub use ed25519_zebra as ed25519; pub use reddsa; pub use redjubjub; diff --git a/zebra-chain/src/primitives/byte_array.rs b/zebra-chain/src/primitives/byte_array.rs new file mode 100644 index 00000000000..7484864e8d0 --- /dev/null +++ b/zebra-chain/src/primitives/byte_array.rs @@ -0,0 +1,14 @@ +//! Functions for modifying byte arrays. + +/// Increments `byte_array` by 1, interpreting it as a big-endian integer. +/// If the big-endian integer overflowed, sets all the bytes to zero, and returns `true`. +pub fn increment_big_endian(byte_array: &mut [u8]) -> bool { + // Increment the last byte in the array that is less than u8::MAX, and clear any bytes after it + // to increment the next value in big-endian (lexicographic) order. + let is_wrapped_overflow = byte_array.iter_mut().rev().all(|v| { + *v = v.wrapping_add(1); + v == &0 + }); + + is_wrapped_overflow +} diff --git a/zebra-chain/src/primitives/viewing_key.rs b/zebra-chain/src/primitives/viewing_key.rs new file mode 100644 index 00000000000..3a34d534dc6 --- /dev/null +++ b/zebra-chain/src/primitives/viewing_key.rs @@ -0,0 +1,43 @@ +//! Type definitions for viewing keys and their hashes. + +use crate::parameters::Network; + +mod orchard; +mod sapling; + +use orchard::OrchardViewingKey; +use sapling::SaplingViewingKey; + +#[cfg(test)] +mod tests; + +/// A Zcash Sapling or Orchard viewing key +#[derive(Debug, Clone)] +pub enum ViewingKey { + /// A viewing key for Sapling + Sapling(SaplingViewingKey), + + /// A viewing key for Orchard + Orchard(OrchardViewingKey), +} + +impl ViewingKey { + /// Accepts an encoded Sapling viewing key to decode + /// + /// Returns a [`ViewingKey`] if successful, or None otherwise + fn parse_sapling(sapling_key: &str, network: Network) -> Option { + SaplingViewingKey::parse(sapling_key, network).map(Self::Sapling) + } + + /// Accepts an encoded Orchard viewing key to decode + /// + /// Returns a [`ViewingKey`] if successful, or None otherwise + fn parse_orchard(sapling_key: &str, network: Network) -> Option { + OrchardViewingKey::parse(sapling_key, network).map(Self::Orchard) + } + + /// Parses an encoded viewing key and returns it as a [`ViewingKey`] type. + pub fn parse(key: &str, network: Network) -> Option { + Self::parse_sapling(key, network).or_else(|| Self::parse_orchard(key, network)) + } +} diff --git a/zebra-chain/src/primitives/viewing_key/orchard.rs b/zebra-chain/src/primitives/viewing_key/orchard.rs new file mode 100644 index 00000000000..ddb0664be2a --- /dev/null +++ b/zebra-chain/src/primitives/viewing_key/orchard.rs @@ -0,0 +1,17 @@ +//! Defines types and implements methods for parsing Orchard viewing keys and converting them to `zebra-chain` types + +use crate::parameters::Network; + +/// A Zcash Orchard viewing key +#[derive(Debug, Clone)] +pub enum OrchardViewingKey {} + +impl OrchardViewingKey { + /// Accepts an encoded Orchard viewing key to decode + /// + /// Returns a [`OrchardViewingKey`] if successful, or None otherwise + pub fn parse(_key: &str, _network: Network) -> Option { + // TODO: parse Orchard viewing keys + None + } +} diff --git a/zebra-chain/src/primitives/viewing_key/sapling.rs b/zebra-chain/src/primitives/viewing_key/sapling.rs new file mode 100644 index 00000000000..0b699706c81 --- /dev/null +++ b/zebra-chain/src/primitives/viewing_key/sapling.rs @@ -0,0 +1,87 @@ +//! Defines types and implements methods for parsing Sapling viewing keys and converting them to `zebra-chain` types + +use zcash_client_backend::encoding::decode_extended_full_viewing_key; +use zcash_primitives::{ + constants::*, + sapling::keys::{FullViewingKey as SaplingFvk, SaplingIvk}, + zip32::DiversifiableFullViewingKey as SaplingDfvk, +}; + +use crate::parameters::Network; + +/// A Zcash Sapling viewing key +#[derive(Debug, Clone)] +pub enum SaplingViewingKey { + /// An incoming viewing key for Sapling + Ivk(Box), + + /// A full viewing key for Sapling + Fvk(Box), + + /// A diversifiable full viewing key for Sapling + Dfvk(Box), +} + +impl SaplingViewingKey { + /// Accepts an encoded Sapling extended full viewing key to decode + /// + /// Returns a [`SaplingViewingKey::Dfvk`] if successful, or None otherwise + fn parse_extended_full_viewing_key(sapling_key: &str, network: Network) -> Option { + decode_extended_full_viewing_key(network.sapling_efvk_hrp(), sapling_key) + // this should fail often, so a debug-level log is okay + .map_err(|err| debug!(?err, "could not decode Sapling extended full viewing key")) + .ok() + .map(|efvk| Box::new(efvk.to_diversifiable_full_viewing_key())) + .map(Self::Dfvk) + } + + /// Accepts an encoded Sapling diversifiable full viewing key to decode + /// + /// Returns a [`SaplingViewingKey::Dfvk`] if successful, or None otherwise + fn parse_diversifiable_full_viewing_key(_sapling_key: &str, _network: Network) -> Option { + // TODO: Parse Sapling diversifiable full viewing key + None + } + + /// Accepts an encoded Sapling full viewing key to decode + /// + /// Returns a [`SaplingViewingKey::Fvk`] if successful, or None otherwise + fn parse_full_viewing_key(_sapling_key: &str, _network: Network) -> Option { + // TODO: Parse Sapling full viewing key + None + } + + /// Accepts an encoded Sapling incoming viewing key to decode + /// + /// Returns a [`SaplingViewingKey::Ivk`] if successful, or None otherwise + fn parse_incoming_viewing_key(_sapling_key: &str, _network: Network) -> Option { + // TODO: Parse Sapling incoming viewing key + None + } + + /// Accepts an encoded Sapling viewing key to decode + /// + /// Returns a [`SaplingViewingKey`] if successful, or None otherwise + pub(super) fn parse(key: &str, network: Network) -> Option { + // TODO: Try types with prefixes first if some don't have prefixes? + Self::parse_extended_full_viewing_key(key, network) + .or_else(|| Self::parse_diversifiable_full_viewing_key(key, network)) + .or_else(|| Self::parse_full_viewing_key(key, network)) + .or_else(|| Self::parse_incoming_viewing_key(key, network)) + } +} + +impl Network { + /// Returns the human-readable prefix for an Zcash Sapling extended full viewing key + /// for this network. + pub fn sapling_efvk_hrp(&self) -> &'static str { + if self.is_a_test_network() { + // Assume custom testnets have the same HRP + // + // TODO: add the regtest HRP here + testnet::HRP_SAPLING_EXTENDED_FULL_VIEWING_KEY + } else { + mainnet::HRP_SAPLING_EXTENDED_FULL_VIEWING_KEY + } + } +} diff --git a/zebra-chain/src/primitives/viewing_key/tests.rs b/zebra-chain/src/primitives/viewing_key/tests.rs new file mode 100644 index 00000000000..0b86c143131 --- /dev/null +++ b/zebra-chain/src/primitives/viewing_key/tests.rs @@ -0,0 +1,15 @@ +//! Tests for zebra-chain viewing key hashes + +use super::*; + +/// The extended Sapling viewing key of [ZECpages](https://zecpages.com/boardinfo) +pub const ZECPAGES_SAPLING_VIEWING_KEY: &str = "zxviews1q0duytgcqqqqpqre26wkl45gvwwwd706xw608hucmvfalr759ejwf7qshjf5r9aa7323zulvz6plhttp5mltqcgs9t039cx2d09mgq05ts63n8u35hyv6h9nc9ctqqtue2u7cer2mqegunuulq2luhq3ywjcz35yyljewa4mgkgjzyfwh6fr6jd0dzd44ghk0nxdv2hnv4j5nxfwv24rwdmgllhe0p8568sgqt9ckt02v2kxf5ahtql6s0ltjpkckw8gtymxtxuu9gcr0swvz"; + +/// Tests that `ViewingKey::parse` successfully decodes the zecpages sapling extended full viewing key +#[test] +fn parses_sapling_efvk_correctly() { + let _init_guard = zebra_test::init(); + + ViewingKey::parse(ZECPAGES_SAPLING_VIEWING_KEY, Network::Mainnet) + .expect("should parse hard-coded viewing key successfully"); +} diff --git a/zebra-chain/src/primitives/zcash_primitives.rs b/zebra-chain/src/primitives/zcash_primitives.rs index dcb051773af..a9fbd9f4b95 100644 --- a/zebra-chain/src/primitives/zcash_primitives.rs +++ b/zebra-chain/src/primitives/zcash_primitives.rs @@ -336,3 +336,21 @@ pub(crate) fn transparent_output_address( None => None, } } + +impl From for zcash_primitives::consensus::Network { + fn from(network: Network) -> Self { + match network { + Network::Mainnet => zcash_primitives::consensus::Network::MainNetwork, + Network::Testnet => zcash_primitives::consensus::Network::TestNetwork, + } + } +} + +impl From for Network { + fn from(network: zcash_primitives::consensus::Network) -> Self { + match network { + zcash_primitives::consensus::Network::MainNetwork => Network::Mainnet, + zcash_primitives::consensus::Network::TestNetwork => Network::Testnet, + } + } +} diff --git a/zebra-chain/src/sapling.rs b/zebra-chain/src/sapling.rs index 11bfd899d9c..d9b30ba430c 100644 --- a/zebra-chain/src/sapling.rs +++ b/zebra-chain/src/sapling.rs @@ -24,7 +24,9 @@ pub mod shielded_data; pub mod spend; pub mod tree; -pub use commitment::{CommitmentRandomness, NoteCommitment, ValueCommitment}; +pub use commitment::{ + CommitmentRandomness, NotSmallOrderValueCommitment, NoteCommitment, ValueCommitment, +}; pub use keys::Diversifier; pub use note::{EncryptedNote, Note, Nullifier, WrappedNoteKey}; pub use output::{Output, OutputInTransactionV4, OutputPrefixInTransactionV5}; diff --git a/zebra-chain/src/sapling/arbitrary.rs b/zebra-chain/src/sapling/arbitrary.rs index 1012cfb8a6e..403633a1b8e 100644 --- a/zebra-chain/src/sapling/arbitrary.rs +++ b/zebra-chain/src/sapling/arbitrary.rs @@ -84,9 +84,7 @@ impl Arbitrary for Output { .prop_map(|(enc_ciphertext, out_ciphertext, zkproof)| Self { cv: ExtendedPoint::generator().try_into().unwrap(), cm_u: NoteCommitment(AffinePoint::identity()).extract_u(), - ephemeral_key: keys::EphemeralPublicKey( - ExtendedPoint::generator().try_into().unwrap(), - ), + ephemeral_key: keys::EphemeralPublicKey(ExtendedPoint::generator().into()), enc_ciphertext, out_ciphertext, zkproof, diff --git a/zebra-chain/src/sapling/commitment.rs b/zebra-chain/src/sapling/commitment.rs index 65c789dc6ed..1ef23081550 100644 --- a/zebra-chain/src/sapling/commitment.rs +++ b/zebra-chain/src/sapling/commitment.rs @@ -158,6 +158,7 @@ impl NoteCommitment { /// /// #[derive(Clone, Copy, Deserialize, PartialEq, Eq, Serialize)] +#[cfg_attr(any(test, feature = "proptest-impl"), derive(Default))] pub struct ValueCommitment(#[serde(with = "serde_helpers::AffinePoint")] jubjub::AffinePoint); impl<'a> std::ops::Add<&'a ValueCommitment> for ValueCommitment { @@ -302,6 +303,7 @@ lazy_static! { /// /// #[derive(Debug, Clone, Copy, Deserialize, PartialEq, Eq, Serialize)] +#[cfg_attr(any(test, feature = "proptest-impl"), derive(Default))] pub struct NotSmallOrderValueCommitment(ValueCommitment); impl TryFrom for NotSmallOrderValueCommitment { diff --git a/zebra-chain/src/sapling/note/ciphertexts.rs b/zebra-chain/src/sapling/note/ciphertexts.rs index 472dbfb0a44..75d25730627 100644 --- a/zebra-chain/src/sapling/note/ciphertexts.rs +++ b/zebra-chain/src/sapling/note/ciphertexts.rs @@ -12,6 +12,12 @@ use crate::serialization::{SerializationError, ZcashDeserialize, ZcashSerialize} #[derive(Deserialize, Serialize)] pub struct EncryptedNote(#[serde(with = "BigArray")] pub(crate) [u8; 580]); +impl From<[u8; 580]> for EncryptedNote { + fn from(byte_array: [u8; 580]) -> Self { + Self(byte_array) + } +} + impl fmt::Debug for EncryptedNote { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_tuple("EncryptedNote") @@ -59,6 +65,12 @@ impl ZcashDeserialize for EncryptedNote { #[derive(Deserialize, Serialize)] pub struct WrappedNoteKey(#[serde(with = "BigArray")] pub(crate) [u8; 80]); +impl From<[u8; 80]> for WrappedNoteKey { + fn from(byte_array: [u8; 80]) -> Self { + Self(byte_array) + } +} + impl fmt::Debug for WrappedNoteKey { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_tuple("WrappedNoteKey") diff --git a/zebra-chain/src/sapling/tree.rs b/zebra-chain/src/sapling/tree.rs index bef003243c2..c1c15bb5dc0 100644 --- a/zebra-chain/src/sapling/tree.rs +++ b/zebra-chain/src/sapling/tree.rs @@ -305,6 +305,10 @@ pub enum NoteCommitmentTreeError { } /// Sapling Incremental Note Commitment Tree. +/// +/// Note that the default value of the [`Root`] type is `[0, 0, 0, 0]`. However, this value differs +/// from the default value of the root of the default tree which is the hash of the root's child +/// nodes. The default tree is the empty tree which has all leaves empty. #[derive(Debug, Serialize, Deserialize)] #[serde(into = "LegacyNoteCommitmentTree")] #[serde(from = "LegacyNoteCommitmentTree")] diff --git a/zebra-chain/src/serialization/error.rs b/zebra-chain/src/serialization/error.rs index f0d0b0ce5fe..17566548d1a 100644 --- a/zebra-chain/src/serialization/error.rs +++ b/zebra-chain/src/serialization/error.rs @@ -2,6 +2,7 @@ use std::{array::TryFromSliceError, io, num::TryFromIntError, str::Utf8Error}; +use hex::FromHexError; use thiserror::Error; /// A serialization error. @@ -31,6 +32,10 @@ pub enum SerializationError { #[error("CompactSize too large: {0}")] TryFromIntError(#[from] TryFromIntError), + /// A string was not valid hexadecimal. + #[error("string was not hex: {0}")] + FromHexError(#[from] FromHexError), + /// An error caused when validating a zatoshi `Amount` #[error("input couldn't be parsed as a zatoshi `Amount`: {source}")] Amount { diff --git a/zebra-chain/src/sprout/tree.rs b/zebra-chain/src/sprout/tree.rs index 5c92692832a..c5ad74a7d6e 100644 --- a/zebra-chain/src/sprout/tree.rs +++ b/zebra-chain/src/sprout/tree.rs @@ -83,12 +83,6 @@ lazy_static! { }; } -/// The index of a note's commitment at the leafmost layer of its Note -/// Commitment Tree. -/// -/// -pub struct Position(pub(crate) u64); - /// Sprout note commitment tree root node hash. /// /// The root hash in LEBS2OSP256(rt) encoding of the Sprout note @@ -206,6 +200,10 @@ pub enum NoteCommitmentTreeError { /// Internally this wraps [`bridgetree::Frontier`], so that we can maintain and increment /// the full tree with only the minimal amount of non-empty nodes/leaves required. /// +/// Note that the default value of the [`Root`] type is `[0, 0, 0, 0]`. However, this value differs +/// from the default value of the root of the default tree (which is the empty tree) since it is the +/// pair-wise root-hash of the tree's empty leaves at the tree's root level. +/// /// [Sprout Note Commitment Tree]: https://zips.z.cash/protocol/protocol.pdf#merkletree /// [nullifier set]: https://zips.z.cash/protocol/protocol.pdf#nullifierset #[derive(Debug, Serialize, Deserialize)] diff --git a/zebra-chain/src/subtree.rs b/zebra-chain/src/subtree.rs index 52e3ef3d7d0..6402b0e077d 100644 --- a/zebra-chain/src/subtree.rs +++ b/zebra-chain/src/subtree.rs @@ -50,31 +50,38 @@ impl From for u64 { // TODO: // - consider defining sapling::SubtreeRoot and orchard::SubtreeRoot types or type wrappers, // to avoid type confusion between the leaf Node and subtree root types. -// - rename the `Node` generic to `SubtreeRoot` /// Subtree root of Sapling or Orchard note commitment tree, /// with its associated block height and subtree index. #[derive(Copy, Clone, Debug, Eq, PartialEq)] #[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))] -pub struct NoteCommitmentSubtree { +pub struct NoteCommitmentSubtree { /// Index of this subtree pub index: NoteCommitmentSubtreeIndex, /// Root of this subtree. - pub node: Node, + pub root: SubtreeRoot, /// End boundary of this subtree, the block height of its last leaf. - pub end: Height, + pub end_height: Height, } -impl NoteCommitmentSubtree { +impl NoteCommitmentSubtree { /// Creates new [`NoteCommitmentSubtree`] - pub fn new(index: impl Into, end: Height, node: Node) -> Self { + pub fn new( + index: impl Into, + end_height: Height, + root: SubtreeRoot, + ) -> Self { let index = index.into(); - Self { index, end, node } + Self { + index, + end_height, + root, + } } /// Converts struct to [`NoteCommitmentSubtreeData`]. - pub fn into_data(self) -> NoteCommitmentSubtreeData { - NoteCommitmentSubtreeData::new(self.end, self.node) + pub fn into_data(self) -> NoteCommitmentSubtreeData { + NoteCommitmentSubtreeData::new(self.end_height, self.root) } } @@ -82,29 +89,25 @@ impl NoteCommitmentSubtree { /// Used for database key-value serialization, where the subtree index is the key, and this struct is the value. #[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Serialize)] #[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))] -pub struct NoteCommitmentSubtreeData { +pub struct NoteCommitmentSubtreeData { /// Merkle root of the 2^16-leaf subtree. - // - // TODO: rename both Rust fields to match the RPC field names - #[serde(rename = "root")] - pub node: Node, + pub root: SubtreeRoot, /// Height of the block containing the note that completed this subtree. - #[serde(rename = "end_height")] - pub end: Height, + pub end_height: Height, } -impl NoteCommitmentSubtreeData { +impl NoteCommitmentSubtreeData { /// Creates new [`NoteCommitmentSubtreeData`] - pub fn new(end: Height, node: Node) -> Self { - Self { end, node } + pub fn new(end_height: Height, root: SubtreeRoot) -> Self { + Self { end_height, root } } /// Creates new [`NoteCommitmentSubtree`] from a [`NoteCommitmentSubtreeData`] and index pub fn with_index( self, index: impl Into, - ) -> NoteCommitmentSubtree { - NoteCommitmentSubtree::new(index, self.end, self.node) + ) -> NoteCommitmentSubtree { + NoteCommitmentSubtree::new(index, self.end_height, self.root) } } diff --git a/zebra-chain/src/transaction.rs b/zebra-chain/src/transaction.rs index 583ca9681e8..3f099c2da77 100644 --- a/zebra-chain/src/transaction.rs +++ b/zebra-chain/src/transaction.rs @@ -511,7 +511,7 @@ impl Transaction { pub fn is_coinbase(&self) -> bool { self.inputs().len() == 1 && matches!( - self.inputs().get(0), + self.inputs().first(), Some(transparent::Input::Coinbase { .. }) ) } diff --git a/zebra-chain/src/work/difficulty.rs b/zebra-chain/src/work/difficulty.rs index a15c1f13ce8..4724188060c 100644 --- a/zebra-chain/src/work/difficulty.rs +++ b/zebra-chain/src/work/difficulty.rs @@ -63,6 +63,7 @@ mod tests; /// /// [section 7.7.4]: https://zips.z.cash/protocol/protocol.pdf#nbits #[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize)] +#[cfg_attr(any(test, feature = "proptest-impl"), derive(Default))] pub struct CompactDifficulty(pub(crate) u32); /// An invalid CompactDifficulty value, for testing. diff --git a/zebra-chain/src/work/equihash.rs b/zebra-chain/src/work/equihash.rs index e8b73b1614a..97927358083 100644 --- a/zebra-chain/src/work/equihash.rs +++ b/zebra-chain/src/work/equihash.rs @@ -12,16 +12,24 @@ use crate::{ }, }; -/// The error type for Equihash +#[cfg(feature = "internal-miner")] +use crate::serialization::AtLeastOne; + +/// The error type for Equihash validation. #[non_exhaustive] #[derive(Debug, thiserror::Error)] #[error("invalid equihash solution for BlockHeader")] pub struct Error(#[from] equihash::Error); +/// The error type for Equihash solving. +#[derive(Copy, Clone, Debug, Eq, PartialEq, thiserror::Error)] +#[error("solver was cancelled")] +pub struct SolverCancelled; + /// The size of an Equihash solution in bytes (always 1344). pub(crate) const SOLUTION_SIZE: usize = 1344; -/// Equihash Solution. +/// Equihash Solution in compressed format. /// /// A wrapper around [u8; 1344] because Rust doesn't implement common /// traits like `Debug`, `Clone`, etc for collections like array @@ -53,6 +61,8 @@ impl Solution { .zcash_serialize(&mut input) .expect("serialization into a vec can't fail"); + // The part of the header before the nonce and solution. + // This data is kept constant during solver runs, so the verifier API takes it separately. let input = &input[0..Solution::INPUT_LENGTH]; equihash::is_valid_solution(n, k, input, nonce.as_ref(), solution)?; @@ -60,11 +70,58 @@ impl Solution { Ok(()) } - #[cfg(feature = "getblocktemplate-rpcs")] + /// Returns a [`Solution`] containing the bytes from `solution`. + /// Returns an error if `solution` is the wrong length. + pub fn from_bytes(solution: &[u8]) -> Result { + if solution.len() != SOLUTION_SIZE { + return Err(SerializationError::Parse( + "incorrect equihash solution size", + )); + } + + let mut bytes = [0; SOLUTION_SIZE]; + // Won't panic, because we just checked the length. + bytes.copy_from_slice(solution); + + Ok(Self(bytes)) + } + /// Returns a [`Solution`] of `[0; SOLUTION_SIZE]` to be used in block proposals. + #[cfg(feature = "getblocktemplate-rpcs")] pub fn for_proposal() -> Self { Self([0; SOLUTION_SIZE]) } + + /// Mines and returns one or more [`Solution`]s based on a template `header`. + /// The returned header contains a valid `nonce` and `solution`. + /// + /// If `cancel_fn()` returns an error, returns early with `Err(SolverCancelled)`. + /// + /// The `nonce` in the header template is taken as the starting nonce. If you are running multiple + /// solvers at the same time, start them with different nonces. + /// The `solution` in the header template is ignored. + /// + /// This method is CPU and memory-intensive. It uses 144 MB of RAM and one CPU core while running. + /// It can run for minutes or hours if the network difficulty is high. + #[cfg(feature = "internal-miner")] + #[allow(clippy::unwrap_in_result)] + pub fn solve( + mut _header: Header, + mut _cancel_fn: F, + ) -> Result, SolverCancelled> + where + F: FnMut() -> Result<(), SolverCancelled>, + { + // TODO: Function code was removed as part of https://github.com/ZcashFoundation/zebra/issues/8180 + // Find the removed code at https://github.com/ZcashFoundation/zebra/blob/v1.5.1/zebra-chain/src/work/equihash.rs#L115-L166 + // Restore the code when conditions are met. https://github.com/ZcashFoundation/zebra/issues/8183 + + Err(SolverCancelled) + } + + // TODO: Some methods were removed as part of https://github.com/ZcashFoundation/zebra/issues/8180 + // Find the removed code at https://github.com/ZcashFoundation/zebra/blob/v1.5.1/zebra-chain/src/work/equihash.rs#L171-L196 + // Restore the code when conditions are met. https://github.com/ZcashFoundation/zebra/issues/8183 } impl PartialEq for Solution { @@ -93,6 +150,13 @@ impl Clone for Solution { impl Eq for Solution {} +#[cfg(any(test, feature = "proptest-impl"))] +impl Default for Solution { + fn default() -> Self { + Self([0; SOLUTION_SIZE]) + } +} + impl ZcashSerialize for Solution { fn zcash_serialize(&self, writer: W) -> Result<(), io::Error> { zcash_serialize_bytes(&self.0.to_vec(), writer) @@ -102,17 +166,6 @@ impl ZcashSerialize for Solution { impl ZcashDeserialize for Solution { fn zcash_deserialize(mut reader: R) -> Result { let solution: Vec = (&mut reader).zcash_deserialize_into()?; - - if solution.len() != SOLUTION_SIZE { - return Err(SerializationError::Parse( - "incorrect equihash solution size", - )); - } - - let mut bytes = [0; SOLUTION_SIZE]; - // Won't panic, because we just checked the length. - bytes.copy_from_slice(&solution); - - Ok(Self(bytes)) + Self::from_bytes(&solution) } } diff --git a/zebra-chain/src/work/tests/prop.rs b/zebra-chain/src/work/tests/prop.rs index 6c3b4ae8ae8..f5bdf010950 100644 --- a/zebra-chain/src/work/tests/prop.rs +++ b/zebra-chain/src/work/tests/prop.rs @@ -18,15 +18,15 @@ fn equihash_solution_roundtrip() { let _init_guard = zebra_test::init(); proptest!(|(solution in any::())| { - let data = solution - .zcash_serialize_to_vec() - .expect("randomized EquihashSolution should serialize"); - let solution2 = data - .zcash_deserialize_into() - .expect("randomized EquihashSolution should deserialize"); - - prop_assert_eq![solution, solution2]; - }); + let data = solution + .zcash_serialize_to_vec() + .expect("randomized EquihashSolution should serialize"); + let solution2 = data + .zcash_deserialize_into() + .expect("randomized EquihashSolution should deserialize"); + + prop_assert_eq![solution, solution2]; + }); } prop_compose! { @@ -94,10 +94,10 @@ fn equihash_prop_test_nonce() -> color_eyre::eyre::Result<()> { block.header.solution.check(&block.header)?; proptest!(|(fake_header in randomized_nonce(*block.header.as_ref()))| { - fake_header.solution - .check(&fake_header) - .expect_err("block header should not validate on randomized nonce"); - }); + fake_header.solution + .check(&fake_header) + .expect_err("block header should not validate on randomized nonce"); + }); } Ok(()) diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 21ab5346343..a98c83b2c81 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-consensus" -version = "1.0.0-beta.30" +version = "1.0.0-beta.35" authors = ["Zcash Foundation "] description = "Implementation of Zcash consensus checks" license = "MIT OR Apache-2.0" @@ -24,7 +24,7 @@ progress-bar = [ "zebra-state/progress-bar", ] -# Experimental mining RPC support +# Mining RPC support getblocktemplate-rpcs = [ "zebra-state/getblocktemplate-rpcs", "zebra-node-services/getblocktemplate-rpcs", @@ -41,19 +41,19 @@ bls12_381 = "0.8.0" halo2 = { package = "halo2_proofs", version = "0.3.0" } jubjub = "0.10.0" rand = "0.8.5" -rayon = "1.7.0" +rayon = "1.8.1" -chrono = { version = "0.4.31", default-features = false, features = ["clock", "std"] } +chrono = { version = "0.4.34", default-features = false, features = ["clock", "std"] } displaydoc = "0.2.4" lazy_static = "1.4.0" once_cell = "1.18.0" -serde = { version = "1.0.188", features = ["serde_derive"] } +serde = { version = "1.0.196", features = ["serde_derive"] } -futures = "0.3.28" +futures = "0.3.30" futures-util = "0.3.28" -metrics = "0.21.1" -thiserror = "1.0.48" -tokio = { version = "1.33.0", features = ["time", "sync", "tracing", "rt-multi-thread"] } +metrics = "0.22.1" +thiserror = "1.0.57" +tokio = { version = "1.36.0", features = ["time", "sync", "tracing", "rt-multi-thread"] } tower = { version = "0.4.13", features = ["timeout", "util", "buffer"] } tracing = "0.1.39" tracing-futures = "0.2.5" @@ -63,19 +63,19 @@ orchard = "0.6.0" zcash_proofs = { version = "0.13.0-rc.1", features = ["multicore" ] } wagyu-zcash-parameters = "0.2.0" -tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.6" } -tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.6" } +tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.11" } +tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.11" } -zebra-script = { path = "../zebra-script", version = "1.0.0-beta.30" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.30" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.30" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.30" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.35" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.35" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.35" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.35" } # prod feature progress-bar howudoin = { version = "0.1.2", optional = true } # Test-only dependencies -proptest = { version = "1.3.1", optional = true } +proptest = { version = "1.4.0", optional = true } proptest-derive = { version = "0.4.0", optional = true } [dev-dependencies] @@ -85,15 +85,15 @@ color-eyre = "0.6.2" tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } hex = "0.4.3" -num-integer = "0.1.45" -proptest = "1.3.1" +num-integer = "0.1.46" +proptest = "1.4.0" proptest-derive = "0.4.0" spandoc = "0.2.2" -tokio = { version = "1.33.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.36.0", features = ["full", "tracing", "test-util"] } tracing-error = "0.2.0" -tracing-subscriber = "0.3.17" +tracing-subscriber = "0.3.18" -zebra-state = { path = "../zebra-state", features = ["proptest-impl"] } -zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } -zebra-test = { path = "../zebra-test/" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.35", features = ["proptest-impl"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.35", features = ["proptest-impl"] } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.35" } diff --git a/zebra-consensus/src/block/check.rs b/zebra-consensus/src/block/check.rs index e9ba97636d4..e539743a3d8 100644 --- a/zebra-consensus/src/block/check.rs +++ b/zebra-consensus/src/block/check.rs @@ -33,7 +33,7 @@ pub fn coinbase_is_first(block: &Block) -> Result, // let first = block .transactions - .get(0) + .first() .ok_or(BlockError::NoTransactions)?; // > The first transaction in a block MUST be a coinbase transaction, // > and subsequent transactions MUST NOT be coinbase transactions. @@ -142,7 +142,7 @@ pub fn equihash_solution_is_valid(header: &Header) -> Result<(), equihash::Error /// [3.9]: https://zips.z.cash/protocol/protocol.pdf#subsidyconcepts pub fn subsidy_is_valid(block: &Block, network: Network) -> Result<(), BlockError> { let height = block.coinbase_height().ok_or(SubsidyError::NoCoinbase)?; - let coinbase = block.transactions.get(0).ok_or(SubsidyError::NoCoinbase)?; + let coinbase = block.transactions.first().ok_or(SubsidyError::NoCoinbase)?; // Validate funding streams let Some(halving_div) = subsidy::general::halving_divisor(height, network) else { @@ -211,7 +211,7 @@ pub fn miner_fees_are_valid( block_miner_fees: Amount, ) -> Result<(), BlockError> { let height = block.coinbase_height().ok_or(SubsidyError::NoCoinbase)?; - let coinbase = block.transactions.get(0).ok_or(SubsidyError::NoCoinbase)?; + let coinbase = block.transactions.first().ok_or(SubsidyError::NoCoinbase)?; let transparent_value_balance: Amount = subsidy::general::output_amounts(coinbase) .iter() diff --git a/zebra-consensus/src/block/subsidy/general.rs b/zebra-consensus/src/block/subsidy/general.rs index b3cb0231734..5be97f3a990 100644 --- a/zebra-consensus/src/block/subsidy/general.rs +++ b/zebra-consensus/src/block/subsidy/general.rs @@ -44,8 +44,8 @@ pub fn halving_divisor(height: Height, network: Network) -> Option { Some(halving_div) } else { let pre_blossom_height = blossom_height - SLOW_START_SHIFT; - let scaled_pre_blossom_height = pre_blossom_height - * HeightDiff::try_from(BLOSSOM_POW_TARGET_SPACING_RATIO).expect("constant is positive"); + let scaled_pre_blossom_height = + pre_blossom_height * HeightDiff::from(BLOSSOM_POW_TARGET_SPACING_RATIO); let post_blossom_height = height - blossom_height; diff --git a/zebra-consensus/src/block/tests.rs b/zebra-consensus/src/block/tests.rs index 13e8be79cf0..2eed87601d3 100644 --- a/zebra-consensus/src/block/tests.rs +++ b/zebra-consensus/src/block/tests.rs @@ -103,7 +103,7 @@ static INVALID_COINBASE_TRANSCRIPT: Lazy< assert_eq!(block3.transactions.len(), 1); // Extract the coinbase transaction from the block - let coinbase_transaction = block3.transactions.get(0).unwrap().clone(); + let coinbase_transaction = block3.transactions.first().unwrap().clone(); // Add another coinbase transaction to block block3.transactions.push(coinbase_transaction); @@ -373,7 +373,7 @@ fn coinbase_validation_failure() -> Result<(), Report> { block.transactions.push( block .transactions - .get(0) + .first() .expect("block has coinbase") .clone(), ); @@ -436,7 +436,7 @@ fn funding_stream_validation_failure() -> Result<(), Report> { // Build the new transaction with modified coinbase outputs let tx = block .transactions - .get(0) + .first() .map(|transaction| { let mut output = transaction.outputs()[0].clone(); output.value = Amount::try_from(i32::MAX).unwrap(); diff --git a/zebra-consensus/src/checkpoint.rs b/zebra-consensus/src/checkpoint.rs index bcd49187764..6541c502f9a 100644 --- a/zebra-consensus/src/checkpoint.rs +++ b/zebra-consensus/src/checkpoint.rs @@ -103,8 +103,8 @@ fn progress_from_tip( if height >= checkpoint_list.max_height() { (None, Progress::FinalCheckpoint) } else { - metrics::gauge!("checkpoint.verified.height", height.0 as f64); - metrics::gauge!("checkpoint.processing.next.height", height.0 as f64); + metrics::gauge!("checkpoint.verified.height").set(height.0 as f64); + metrics::gauge!("checkpoint.processing.next.height").set(height.0 as f64); (Some(hash), Progress::InitialTip(height)) } } @@ -303,7 +303,7 @@ where .next_back() .expect("queued has at least one entry"); - metrics::gauge!("checkpoint.queued.max.height", max_queued_height.0 as f64); + metrics::gauge!("checkpoint.queued.max.height").set(max_queued_height.0 as f64); let is_checkpoint = self.checkpoint_list.contains(height); tracing::debug!(?height, ?hash, ?is_checkpoint, "queued block"); @@ -326,12 +326,12 @@ where return; }; - metrics::gauge!("checkpoint.verified.height", verified_height.0 as f64); + metrics::gauge!("checkpoint.verified.height").set(verified_height.0 as f64); let checkpoint_index = self.checkpoint_list.prev_checkpoint_index(verified_height); let checkpoint_count = self.checkpoint_list.len(); - metrics::gauge!("checkpoint.verified.count", checkpoint_index as f64); + metrics::gauge!("checkpoint.verified.count").set(checkpoint_index as f64); tracing::debug!( ?verified_height, @@ -409,7 +409,7 @@ where // Check if we have the genesis block as a special case, to simplify the loop BeforeGenesis if !self.queued.contains_key(&block::Height(0)) => { tracing::trace!("Waiting for genesis block"); - metrics::counter!("checkpoint.waiting.count", 1); + metrics::counter!("checkpoint.waiting.count").increment(1); return WaitingForBlocks; } BeforeGenesis => block::Height(0), @@ -444,10 +444,7 @@ where break; } } - metrics::gauge!( - "checkpoint.queued.continuous.height", - pending_height.0 as f64, - ); + metrics::gauge!("checkpoint.queued.continuous.height").set(pending_height.0 as f64); // Now find the start of the checkpoint range let start = self.current_start_bound().expect( @@ -466,14 +463,11 @@ where ); if let Some(block::Height(target_checkpoint)) = target_checkpoint { - metrics::gauge!( - "checkpoint.processing.next.height", - target_checkpoint as f64, - ); + metrics::gauge!("checkpoint.processing.next.height").set(target_checkpoint as f64); } else { // Use the start height if there is no potential next checkpoint - metrics::gauge!("checkpoint.processing.next.height", start_height.0 as f64); - metrics::counter!("checkpoint.waiting.count", 1); + metrics::gauge!("checkpoint.processing.next.height").set(start_height.0 as f64); + metrics::counter!("checkpoint.waiting.count").increment(1); } target_checkpoint @@ -541,12 +535,12 @@ where /// Increase the current checkpoint height to `verified_height`, fn update_progress(&mut self, verified_height: block::Height) { if let Some(max_height) = self.queued.keys().next_back() { - metrics::gauge!("checkpoint.queued.max.height", max_height.0 as f64); + metrics::gauge!("checkpoint.queued.max.height").set(max_height.0 as f64); } else { // use f64::NAN as a sentinel value for "None", because 0 is a valid height - metrics::gauge!("checkpoint.queued.max.height", f64::NAN); + metrics::gauge!("checkpoint.queued.max.height").set(f64::NAN); } - metrics::gauge!("checkpoint.queued_slots", self.queued.len() as f64); + metrics::gauge!("checkpoint.queued_slots").set(self.queued.len() as f64); // Ignore blocks that are below the previous checkpoint, or otherwise // have invalid heights. @@ -869,7 +863,7 @@ where let block_count = rev_valid_blocks.len(); tracing::info!(?block_count, ?current_range, "verified checkpoint range"); - metrics::counter!("checkpoint.verified.block.count", block_count as u64); + metrics::counter!("checkpoint.verified.block.count").increment(block_count as u64); // All the blocks we've kept are valid, so let's verify them // in height order. @@ -1058,7 +1052,7 @@ where self.process_checkpoint_range(); - metrics::gauge!("checkpoint.queued_slots", self.queued.len() as f64); + metrics::gauge!("checkpoint.queued_slots").set(self.queued.len() as f64); // Because the checkpoint verifier duplicates state from the state // service (it tracks which checkpoints have been verified), we must diff --git a/zebra-consensus/src/checkpoint/main-checkpoints.txt b/zebra-consensus/src/checkpoint/main-checkpoints.txt index 540ea6ce644..600b614b14d 100644 --- a/zebra-consensus/src/checkpoint/main-checkpoints.txt +++ b/zebra-consensus/src/checkpoint/main-checkpoints.txt @@ -11260,3 +11260,382 @@ 2257898 00000000008820c978833e535149174f965a6b3e4a6ac4b1ddc48345478ba64c 2258298 000000000100d9383f9c9ff554d473e8a276855beb6804b6b9a62f809b567a40 2258698 0000000000a91531d88040a47c0f1b8f5b16ea1833aa97affaa04d5e7fc18aed +2259098 0000000000571f1b435c00f0e25abd36cd4ceb0304dde96c00729cb6ee99ec1d +2259498 00000000008c41739e7388c7e6b18b41932c5c924bf782e67b8741823cfafeed +2259898 0000000000bea5d3b1894ebf311a3cfc0c163cd97aa115343af473284e94011e +2260298 00000000003a6810f0606f912267ed1f3f6c0dd5b60dc8f688fb374d9f3e63ea +2260698 00000000019b46b74c04a7d0b448a947a661c110453d3ccd10bd6a0897af8745 +2261098 0000000000752a51228b709071b85d357e774933071bb8243cdceb4618341902 +2261498 000000000124a04a7f7391a30b89a4cd3f5cbeea1d3611c0e93ed776b6df716f +2261898 00000000000b67012c55101ca03446b697bb89200d379bfe2bfa7c6e29dbcec1 +2262298 000000000112dbcdca20c9b51fff5eb27717b9d5b5a4869ad4c718a8abe32622 +2262698 0000000001981362704b4db27db34cdedb2cfceb1e837d63a1726d88ad562aad +2263098 0000000001598eb97807a862e88de355df1682dc502fd95b421f1a27d3fee4ac +2263498 00000000002f9f176d859355e4b9ed944737ee47d4607bd90e0624b29291288e +2263898 00000000013feb9b48f2dae70c16b2c7ba121dc0bda2863dfdfd88d0205e3981 +2264298 000000000105c9ab091ef19d4d35e87180200e1c36f7693672de4948996f4128 +2264698 000000000064fe72c0067d6b6be354000f105eb1b4fbf5b52fab7101e870f491 +2265098 000000000142cbc97efe0dcf468ac4a12b6d13fae8655d726c31e60e1bbf491a +2265498 00000000016b1ad1ee5e759d79e52f1d39d0675eb898e281e9d2c8e7a1fc4f84 +2265898 000000000066a4adb2083c41c385d1ae7672796b67371d87048e5b3e18b23151 +2266298 0000000000c2a09cdb42b13fba1a4ede82f7c322b8c922866ad1610876f63dfa +2266698 0000000000387c8d142e353c318000fbe631b5425974da15a9ce90ba51a0ba8a +2267098 0000000000dc619700db0f9ceeeae78263fac81dd1edad26e8c5459dc6a08b55 +2267498 00000000009edbdff37aea72431c2f32b12e97a68567915dbcb3d873f004345e +2267898 00000000010d25c50ac7a5cc9b4057607a984994ea67bb742681d95cd4afd861 +2268298 00000000014139ee0626e6ba476062262960a027c9cbb2593b2ddceffedf5171 +2268698 00000000013a75bcab73af462a0e483ceb5815bd1d5f299d7187201dc042cdae +2269098 00000000011d04ab3d6f5ce59b1b27659d3dcd63bc58982b8b97ab22b1c293d8 +2269498 000000000129edc8ac07fea46c87a007894b2cdee54b02fe8178febfe23f738b +2269898 0000000000a002883b350580d359757f2dbe2fbc74b808025a20d19ce1ec25c1 +2270298 00000000001b2e1c6e48417eddc7307053395198d2567bd4d89adeb76a48f9d6 +2270698 0000000001813724b4adf03fa8cb5bbc638e2045a1da562bac2ee9f8254f9b72 +2271098 00000000022d106127c61004ac48b619adc9a317e9447cce843a5ecd479fc5d4 +2271498 000000000182854011d6df42090f85ed825d658fdb308e5d0f840946fff24ab7 +2271898 0000000000af3e54dd1d70506e352ad18ab5500788411779bef4b08b5b1c84ee +2272298 0000000000a023f350d9185df1cdf2bb3c669c2f6c6a37b28d98d27c4e3cd0d7 +2272698 00000000002155c2072c3eee2881ea086387831f42336eea8d155b8628243c0a +2273098 0000000000efbdea08f1dd9b5ea2ab7d82d74bc13af8050618c0f40d4d533705 +2273498 00000000004cc8e0721d2fca0ef626439e1f8c08e4acc45451cd92ce5d1d6c43 +2273898 00000000005cc9d4c0012ed17d3f8d1e1a89408c3d4b120b2ed5b7fa2cc390d1 +2274298 0000000001c7791fc740be4d00c58e46932a3e25e263b831428ad68a95bb367f +2274698 00000000009c079adbb3311e9a7fb078cd3dc3340c4642fd52e2aee2a45cf596 +2275098 0000000000e65beaf7965aae4c0a912d11ca8f9bacd7b29c2b85586ead491081 +2275498 0000000000abbda1bf3dab5f2211700e5df3854c402fa45d5f55856b64dd57cc +2275898 00000000003fd9806ce4571eacd5dea635bf87801f69b65f5491e571d292be0f +2276298 000000000081478c35afefe71c5514915c7b82c50dde006c716bb0b7bcf6ed1d +2276698 0000000001097d10dca940448201b610d4ca8b25696ce37194a506564de8d54c +2277098 0000000001a1c8223c08e8a526eb8b4e1dc75dce32bbbc9f45c6f0eec025fc5f +2277498 0000000001bb2ce9a9098171c023c6e6bdc5a491787a8762195ccd3244b51d9a +2277898 0000000001aa50561c65f5dbb17e1be03d4a48ad07dc3e4c2f455bb51f051a8b +2278298 00000000014ab4ef48e0a1fbfc3730a7cdbc5e4e59315ad575d1ed64bf3eaa06 +2278698 00000000003a3ec27e08e0e9dcfa322a508e2314a2b78aafc3af3c6a557a13be +2279098 0000000001321e28cd6f2cd821eb30326e8786e88ccb95b5bbbecd355863681b +2279498 0000000000b706a6fa7f0d4b5b2b8db2e72dc3e4ca7f1f5012ed8bd8032564d1 +2279898 00000000004993d35b87189ec3ac4693ad4ac9f53219bd343d1cbfbf2a731a46 +2280298 0000000000b770c2eb023ffaa339fbcb2df55254ad172294be59b0cdc675786e +2280698 000000000029832b55a1e0f90ed3ca0099dce7850fcd5f456623b86e2bcc54c1 +2281098 0000000000b00866e4926fdde1102d6ebcc14a7b07d36b57f7ef5fb61d0cb4ba +2281498 00000000000991bbf63b86a75fb8495b8b0127b58c036d412daa89bf01886bff +2281898 0000000001b12331c0d71122aa5c5df0a51354ebc2478b42e213a91d94fb5433 +2282298 0000000000b73fc46b498ca38b59c499781911d160af9289b70b9fe1cff5c6e5 +2282698 00000000015c503748f5ab9a48735a37d283a09cc92d17576d2288edba0ca128 +2283098 00000000011673e6809ee8dfd89673db8e6e58a16f2adc5c1b02fa0f00b83877 +2283498 000000000042fb905cc00922120ff12753362390fa6c7d50ac5b9f67dcf8d2a6 +2283898 000000000084ca2e3257fc6982e9b81a79cb96f305a84dd9fbed786c95dd774e +2284298 000000000069691b01f1d8b42ad95bdff1df472f2b105bcbf23922e4a97e5a81 +2284698 00000000006752f2759564957dc3299cd4ac7d995a16aef4f070b31c56a4bd25 +2285098 00000000011a59ef91766db47bc534ba875fbce6490c6d0f35a8c2d2cc00ad53 +2285498 0000000000e814abdc1e4912653ad47237d0b731d7bc6536c8e459d07cb4cc5d +2285898 00000000001ed38b3251192e4070ea2e7c9ffd61dbf677d1e0e0a0f1e1384c8d +2286298 0000000000e439f2921796034a1a28f1bbbfd2a77c18598f20bda8eff4a07dc8 +2286698 00000000005a3ecb87d8b1e2217e22ce1edc59c9ca3e434123f4ac834d937f94 +2287098 000000000065837cb08ccea8c6f245a72e7691aa595d80e785aa38a0bdbd318c +2287498 0000000000003175441ae9aa24c3f03f7685e3db11336f6313f1fe201210d62d +2287898 00000000011891aeff32f58e78e3aa81bcf0e5096452f15c1d2a4641dde82ccb +2288298 0000000000b25bd6f17598086514288ec94e47b5ac6501ae654c39a819ded4cd +2288698 00000000014874d5f5b2b6bb0814950ce61d14b97e016130a60047882c824b7f +2289098 0000000000c0e221e038934ea433c537dfd5d8314405b0af54c646c9367dcd60 +2289498 00000000015e0c20ce7fcc0d501a247b3e2788e0637306919d1bad19626efee0 +2289898 00000000001f89ed33e5b78dd35ce23f9661484d27eb3e6e76ce8375f917e4e6 +2290298 0000000000031532b65fa6b94d7a1aa5eeef8a55d11a6329134e3f8e25306bbd +2290698 00000000001cf3dcc74241bd26f8044a85627e4367427049b096d567601688f5 +2291098 000000000178a7bde945a75cf8d575dd8dc39c530daa3c4989d9ce26444332ff +2291498 00000000010e4cbdc80c7d873cc55b68aca818a8394bcfbe98e2122f6ebde723 +2291898 0000000000cb1c8a7689f22372821cfe241d6459279b8010fb54e1453ab6b956 +2292298 00000000011c8d57992a988463b7ed3ffb9621b7911985dd6110ce9539fc1a74 +2292698 0000000001f4b6b418acce67d3d3491257888a798a1cfe700785ccf3051dbb40 +2293098 000000000153b9e983da00ac7ca829bef4b92b58a7d3f3eca31b7bd76e34af2c +2293498 0000000001abe7f0748d84ccfae224b0147cecc7b83772e590cdf3f4fa6cac69 +2293898 0000000001299cac3639c2ae930120d1644f5304e9260b541236555c797cbbca +2294298 00000000007942814fdb9768244836e7d3cb6e27e4a6e9eea620a4ebfc67c2ba +2294698 00000000011027d373ea2ac93451e9d953880c1c1bba2f5b9b53f4062d3caa4c +2295098 000000000176ddf435f02c388ada40dcaca1b739fd5724e92999df280526f85b +2295498 0000000000e52e15cc76aaf051dcd22bbe9bd3a3b68c66f9aa9ea252499d1e81 +2295898 000000000131f1392ea39738abc23babbe41ddb415fdabce8c15c8a35f9e4ca6 +2296298 0000000000b491e08545a33eb6b55fdc2481bd616351f2b48292b514f6d67666 +2296698 0000000000a52e704cea2742a2abde854d540ac955ceaaea227c5ba25dbdb71f +2297098 0000000000bc1540795fb142d202dd7b9264d7789d3733e8612851ae7cdbd0d7 +2297498 00000000002feddfff55c0533d7dcc77f7aa457c19a9b80df2b9b2d8c66d7094 +2297898 00000000016e1eb1940a4b6d2ba26b5d4f33230983b0cf4f2567a1028892a518 +2298298 0000000000b7816ea932eb97f289dc3705e5794707f155bdbda1119df040d2cd +2298698 0000000000e3606691113bd16d0af056977ffa79b6e3841e2b5b589e641efe12 +2299098 0000000000b535b66166b1b42d78085dcac66300eb81465d575bc7de3cc26747 +2299498 0000000000567a67094f6ab26944175b3543e479186674f6dd8f7e28cc627673 +2299898 000000000178d0fd509e633794d3992048bb8bf95eb3f3edda8104a4aff3d7ee +2300298 0000000000a8e816f6873c87c45441be870a60c1a8ea4e10ef6563ac437ccdbc +2300698 000000000023e576eb60514c31db1154e4f03b8553ee01334a612909dedb0b3b +2301098 000000000060c546c617c56dbc369c2433b286b24910f6a5f62f9c322d7642e7 +2301498 00000000012799a8486a04e3ffb40ae25770ea205f081e50d7f2b96ca02a23e1 +2301898 0000000000d2ff96ef9c54a739293c587f7cda7553e53e135cca8c1943249ce8 +2302298 00000000013855741f6e0b9b74927e5151ca25c29a7407c7727bb607d3158e47 +2302698 00000000016f3ed8d2d3c13a20129072d3dc5e02b55ade28f6d191f0c0aae8f1 +2303098 000000000173a40e0557a8b44a692313fd393cd9adcbe7dcb510c6b7ab6cfd73 +2303498 0000000001733853d08d78e8188fbdb7fb192dd0b6c74a08344f6f77c5f6a676 +2303898 0000000001124111444b85daae68960df16878614244c7df777028efc13590cc +2304298 0000000000ec512eb3d57ce08f904c6de85d1bff4e0d572f26b02df6ed2f75af +2304698 0000000002b8ba319d79e5aeedac5858c99207ae14e391d1920cc2e949fd8028 +2305098 0000000001a7df726d5be9baea1fb8654c6f10aeccfe7b17aaba5af39d3ffe08 +2305498 000000000152b5fd63181a6662ba8e85f038fdc5916c36f68ece6a6351f6b59a +2305898 0000000000247f83173e7763274ced029470d57eadf4ef51116faa9a249d7999 +2306298 0000000000a6356488cf68a8f947aa6a23755d6e927625b79f26dde1734fdc30 +2306698 00000000013063329796c0408d424bbbf87d88784358e2ae36a09cb56ad53601 +2307098 0000000000f47ba701cd81044bce2ba16187284f340f93c39982d27f71297b6b +2307498 0000000000a72c661b91cf4cd3d127c6445f8da3aa1e9ff01b1d7e6bdc439130 +2307898 00000000001574a84ba3bd9be481ed5c84092bb62a7dfe5e8ce3369325f442cc +2308298 00000000005d829a743a7e887acffef6afacc40f29fe3ff8d533d83632bb992d +2308698 00000000013c151584751ba0dd39c647c1f7a84ea0c73b547f05d3455d50e68d +2309098 00000000000f83954948d745b67d76fbad2b7b1f45e0577e8159171f953c1df5 +2309498 00000000014087c83f96bf425afdb59c6d55cab4be91d863e72ec27ba0fb8203 +2309898 0000000001222834a2ff4b1d930c24b4188468be9afff99df1f2764d654bdb00 +2310298 000000000055ec48eb2d4a97eac76c4f244285994daa3ba7f544c6ac7bc5b8c4 +2310698 000000000169069103cbc6dd22e48c3d6c166fa2a3f53cf1ab9d2458508ab486 +2311098 0000000001af2ca95be39e4746d71964a657a991b1e35a21fcf7b71487935296 +2311498 0000000001545c40c27a125b73e3d34a901c6bc9c3e0cf07ea4df3cf54c92efd +2311898 00000000005e32e382cb5dc5e0d348e82321dbf7357929508d5d32c90440808c +2312298 00000000016d8c2c891a4c6fa26e0fdbdf2f77058675d3a5b1efb7bf411c9b0e +2312698 000000000085b9ff694f7d13269861a2dd975344d1bd71976a315ef70335a1e5 +2313098 00000000001d28127bcd2715ca3a0dd02c8118d3b4f474baae78054c72ff80bd +2313498 00000000019b8c237101b15d4274edec586119190a9d090a6bffeeb10f5143d3 +2313898 000000000104504614e77a33609942d1f847172c15f312c7bd492ad01c86272d +2314298 0000000000916ee2a8e50d4a2a4c059a1bf5721a399dfe735eb7284cca5622f9 +2314698 0000000000ab56107e185012af88dc242b9a7b4cf31df67f87e8e79924830c3b +2315098 0000000001bc4c04769639cc248d55696323aad122f89b1edd1cee1b1c2473fd +2315498 0000000000cb59e64f0794feb4abb5d24401abf1dc1829f345e2830547055ea0 +2315898 00000000015cf7dd94daaff9b862b110b5b56d0b13d6f3b254986d0fa7519b55 +2316298 0000000000653249d1653700e6aabe1b50f82afdcd1a95a0c165f9cde0962c6b +2316698 0000000000efcf36599361d9252291926179de9ee3346d802c506e808f9bb70c +2317098 000000000030eea323b6af247d94e19e39d6fd7a082e832be907e403cf6e46e2 +2317498 0000000000296b8958a02284b2d16678d8a77d7b5ce2c2a6125e0e6833796eda +2317898 00000000000b2aefaf4bb6149c8f79ff26d26628585bf958ef64cc31eeea6a72 +2318298 00000000011eb147f4fc11ba5bc267821d6634aee79de4b3eb6cccefec13338e +2318698 000000000065b3081c44ce0e02298b79d2d89df35f4c39f9a83614ebd68426e1 +2319098 000000000084e319f2659a2b02aaab6d87639a88bab4235b9211cfde19b0b26f +2319498 0000000000db047eb2129d4153d94689fc2369158b4ebe112ff93f50a441e739 +2319898 00000000011d08082023c66678d74afb7d5891d91c7d5e881d21dfb9cf06e05b +2320298 000000000100c6ce7572fbecf9297ad4c7af175e792a5d6be1d7c26e1304b12f +2320698 0000000000b21ec56046a2ff1d59eca3665b3c44bde3b8f49f4e5d084ef0299c +2321098 000000000023cf9e37285c3502340ae1befc518def5152a412da248636f53457 +2321498 0000000000164bb89ccf4a361f396dd67e5008c3e5434e52713a85fdb7e5d714 +2321898 00000000010983c6b200ae4d9d2e57a3348bd5855a72595b5dd9820639e3256d +2322298 000000000016b729a075516aec31821034c7eed81d42353be17384522b5d1c42 +2322698 0000000000c154204b465e204411481ce2b094a914174c5a759a05ca7453e6c3 +2323098 0000000000e90d27adc086d59a4e45916127e191bb179668e568bfc2ccef8599 +2323498 0000000001368dca968b18ca0da1d3149b247694e613aca805db0a99d5cc5f76 +2323898 0000000001aae4ec8c15d86da8da080a6396c1ad01a182281ac9b42ab69beb62 +2324298 0000000000b0dafae95559741a6606a183ed56479bf9c1d136251773cd8f7fcb +2324698 0000000000fccee1c36ab061d1af1a53622337074d1e535f43f6ee2e6c1c3155 +2325098 0000000000f6146d09314446a58643a48231c55784ff4a5de52126587c21e257 +2325498 0000000000e82279b647e992d170addd911d9d15f8d13f06e735f76012ba280e +2325898 000000000054ddfc737e769322ed63fd899606e6b70c005cb063ea8df8b06c41 +2326298 00000000010cfa664246c481667f8608ba5e6deb163949232b37fd4250965e5f +2326698 0000000000b21c5a7d6093c198d2fff4cb73bf1559bfbf612f10c3d86a3e6df0 +2327098 00000000005b4093d77933d41830f2626d5da974abe2bd92d6c9e7c7a37f4ba0 +2327498 0000000001603b72b86d84709aeacce0ffdad5cea2f4a71b61efeb48232236b5 +2327898 00000000002f871002b5a8862229a706bbe52cecccbb741078b02ba575dd903f +2328298 000000000076c214d1ac87fd46e5c604036a78bfe77f534b103689744ca409d4 +2328698 0000000000aac594008bf99e7d72f9465228e5514ace06478a64574318a35d24 +2329098 000000000006145e32f52c7b64fb0eab6bf2898e43ae4d03351a5fb629f784fb +2329498 00000000012818f16bdb2a5e29a44ce147ac7249dbd43e7590399e8c394ed92a +2329898 000000000146d8bae9be2b5ba2e1cec99f54e4523f38f62881e6047140df937a +2330298 0000000000d457e2afea2ffc2aceb54272b0071a3d5a79cc27a360102f4ec775 +2330698 0000000000a261675ef61bcb7fcb191ea6588ab8214bd365ce3742e1a59b49f1 +2331098 0000000001f51c98c959bcf74020707de2269e556e71c58afb775c69080b8800 +2331498 00000000007a6699ea6954280c40e3fd9e57ce34b4f1a612ada64815c5bc890c +2331898 00000000009742c1537c5c5dd43e5c2d8118dd85697843bd90d10b9dacae5191 +2332298 0000000001fd2793444b84aa6231a29ddc1988297102e058fbbed327fa32e870 +2332698 000000000123d71151863f78260a4bc78fb0a676b969ee8fc5712b367445ef93 +2333098 000000000140e7c0487f277fa0b527114055c8ea452320a5e7b12d03a8cf355d +2333498 00000000016523873d9b5a78f298483a31a717479e57be2dc49b3db348c9858e +2333898 0000000000d8f871a3367dabb2dbe3310c9a04ec55e0f9c2d30a32091fca7dba +2334298 0000000000eede4b59db09365e29c11229b930598e92017c92f5256eb13fd0c7 +2334698 000000000043465ec9999afce2d1e31680ecf4daafdbc676bf4598616860ccc5 +2335098 00000000018e05feed9c892c80b3bdfdfd2e4a18625b6f9b006070ea8fa4dd58 +2335498 000000000107f668c4e68bf1314dc48134d3f1427610f56b4f31d925613b8272 +2335898 0000000000bd1afc508fbaf401d9222e3cfe6c95be2a4fdff10fd23a18160337 +2336298 00000000000d6105ad108ecebd0fedd9af51d70288cf3650c1ebce7be0f5cf01 +2336698 0000000001548acf17ae82571e4a48abce32eebd2dad60cfb9f19161bd790850 +2337098 0000000000b3fb690ec3d0e5bb4ec3b705ae33b6702e7743eac328ad8b9f101e +2337498 0000000000df70d125b515c25982ef643a329fb012ac1e6b32ab1f507b4e6b8f +2337898 0000000001d22657937d7746e7944b78345f5198a18692e88fde803bedf2bbd0 +2338298 0000000000fd18126bd76b8c94486977eff2db323b9326f3ff4eb3716341a442 +2338698 0000000001b76112e6712a0d330cd98558c35def776ec5c07dcc10af3307dcf5 +2339098 00000000000d9ee388b2f3a3e2001bb999d8d915475ec91abd57d730256f1f56 +2339498 000000000083a588f79b252c7d992d1dac55bcdd3acde6966fe6436cd404e71f +2339898 00000000012af3253fee2eeba7e12278896c64ef7a15578da2c18ceacc362796 +2340298 00000000011b05067875f77f1851afb965abfa651ad97c42f0ad02b19a2b81d3 +2340698 00000000002383aca2e9135cea8b77f39a49c8c386b35ca2499e2a0ed27b5051 +2341098 00000000006bb88b6277f006d20df984c440d93e9c2ddfea4b41eb691e78944d +2341498 00000000015155b50173bb40bc2170b8adef6920c0dc25459ed572bde1d60517 +2341898 0000000001198d04eeb0e5724572d4128c0917249e39dae3ac1b80d739a01677 +2342298 00000000011048909ea6adc462f947a9e810aeb478258edc1b087a7a67282907 +2342698 00000000014caecb262063018e69d3bba1c9d14d67224b493fa7bee41f2431b8 +2343098 00000000005c04ed4452cda9c3de74aa08aa4e28d5f1dc730b777770ef2f0564 +2343498 0000000000f07aebf4b0a3ba703cfb9cdd6c9674c099ab5ac7bdbd1867731883 +2343898 0000000000ea2ff2ab3a1d764143a6584fa358ba295ae1d92947345279622e33 +2344298 000000000043120baca2f52a6b0a16150d3079c1e6af1f1c01f906350040a9d6 +2344698 0000000000ca7e3a506a243d1842a831e7aee517769198ab1d442163189b0ca7 +2345098 00000000002ea4e096082d3b709674936025c42f9ea4e6e706f90e2222d85249 +2345498 00000000005c291738afb7b9963ef96eca29267a8c87303f1b70154d2accaf8e +2345898 0000000000238b9ccc6f1acb3c2b48fe7bb00c5c15c0a7a943ce91c38677cf44 +2346298 0000000001090343c2fde0f8ec215c1ac9fcfe71ca939ae2e33e0b97045057ea +2346698 00000000004fab3fbd450e0abe088dd3b1abd78eb442a17f304d31af8fe03c18 +2347098 00000000011dbf3e20736af606e4750089b59953035dd22389723ccee60bf295 +2347498 00000000019faff4f1a06bacc8df3c486aadeb8bd9676562c1f021a82bcaa3e2 +2347898 0000000000e78a3f9b956fbc07360fd41a1271c73e6c77e92de8cf7076241cb5 +2348298 00000000001debfe46d556e884510fffa67084ca3b30b3c2bad345764539d33f +2348698 0000000000c02ad2acfe6f2ef3fe5bbaa4c44d801d90f0dcc85cf83cc83c253f +2349098 0000000000e1e6b0af58cff90460261910a4bad93f33e481ac8f3bc057479a9c +2349498 00000000008fa1dfca8873a2cd06d86e93c05fe723bf6ce22ddd9b5dea43121a +2349898 00000000017fa9510f267db1d557314904974e25a470dcdba45af9c6b628b578 +2350298 000000000048891a7c1cb6a8ec2292c65ba427ca8971ea14ede0a4b895e6166e +2350698 00000000019d30c08d20a523547c81225da08da4ef8f1efd4957caf1d486bb3c +2351098 000000000189dc2d23f55f4e5ede3ffc800b3e82e82be1582418f441fcecb6d8 +2351498 0000000000ea179af4aad8d7806ed51648625fb549651cf6609bccd4b9f01eea +2351898 000000000015e8f4967c668c8526f45b9ef0a160e2c4896974652ed29a2972a2 +2352298 0000000000816f6d3a1c58e6bb900896523e3a45835b9ae44f61c5ce92b1d8a8 +2352698 00000000014ac0218833f422a5391d2e7a61a8e7c08495130934a41d8f85f500 +2353098 00000000010c6c04a5463e390dde4ec04f1ddb192ffbe895e77d312507cd304a +2353498 0000000000dfd79f51bad15bc183de6dfb65fc90204eeaa8216456d3ddb86d50 +2353898 00000000009947319885440c52a1ed0da40c8d93bc951bb8b6d408c9eb884e15 +2354298 00000000012a21052e5fc5c8a1a77c0f30f8848d0eae522dd280e37252911d99 +2354698 0000000000560f84d9f2ff9d81fb6a6e67fbe266e09328f245cc7f0f9f7772bf +2355098 00000000012da67ef5a12e507e6230105b6e1be93c77dd250c3ffd16b4d448df +2355498 0000000000d50c7fd6fac446ff226a492a7430f8f18fc473651cf774e2272c56 +2355898 0000000001149d6d0b30bc2a0d37ca0d51131478a3e89bc113f50d892c6846bd +2356298 00000000005c5cb4c389100efbfc5e84fa1358eb464b67a1b952cf30e698be5e +2356698 000000000005d4e2f49824d5ea8247a593c095d2465191ad6d1999c9081144f9 +2357098 0000000000c9b8a20a94d9fdabf5142d126594602be85b3cc3bd57712a70018e +2357498 000000000125e2ec66b19a4857c2417fda5e5bd170d805a1f7fcbfef87482287 +2357898 000000000073febaad03e0d7291b5cde1f3dcfc2b6705ab748f2d66a9aef0916 +2358298 0000000000fa3b522b225f93512486b3001125fdd14211967cec40bd40de97ad +2358698 00000000006f88681fe52e10687c41c664e4cd9da877f0d2f12c51ad335d8edf +2359098 000000000163d6a1001e152fcc1e95630414edc0aa186e4c83ba7d7ab0a2fe17 +2359498 0000000000d5c06410e2f19752cd2ead951850300fc6f3243d28bdca20d5e164 +2359898 0000000000e9166944e6c7f1ac23ce60b150730a2a11814a9f47cd1f8059a3e7 +2360298 0000000000f79c997aa792e44761717252abb5fdcbb81f006d0a4f613f0386b0 +2360698 0000000000fbe1589e26c7224d403b26a4172fdc23c1da28bc498d5a5db89f04 +2361098 0000000000711603c0189bc86357c2cd1651313e5e27ab0b522bec3c2fc1255e +2361498 0000000000faaa8d3560900b5d344e6c54fda24e3acb9c03ea35daedb1a73336 +2361898 0000000000ea2182f9ac65f68467c4f6f02a920fe4f4d0592cc79831d313bcd6 +2362298 00000000014bdc0625f09db23561e7693896bb966d02b67c39ad733440c1a450 +2362698 00000000011a3ba79608acb53d15ddca0f5f68b91e192ef53b0e0ac6dab59f99 +2363098 0000000000f3bc47062a7ec2e910a9da47c1c12960e39a1a0603c1805eef6c68 +2363498 0000000000bcadf66751e448a1baa148e0c0abc9c7c0d9368ceac26bb40dca2d +2363898 0000000001c902775fe71263704ef28319f433fbc41f4d558b12fbf0913b69af +2364298 00000000012c2ca5981d9ed0af4ae713c4f6d8d5a0ba194a85388b756bdd9c24 +2364698 00000000003a762b8bbe2a7e6e1033549048e8b3e8f6b961e47f973ea051786e +2365098 00000000005b11792130ee1ce11159ebf11ac9a2bddf553f65fbbeb75d11aa8c +2365498 0000000000d6e491064644ea7bb56958519f58fa805ba8b35bbb4187fdfad584 +2365898 0000000001498d9201569b0188d456cc10f41414f24912f182bbd72360af09ea +2366298 00000000015f0e6d91b0d9a83772b922e6df27af1fc379063142ec56dbb77e38 +2366698 0000000000be6eb20319737f8617776dbe8481c574ce0e8d02dd2f1f2ee63d48 +2367098 00000000007d2aa75bc386d2938851e4f308efc02bf29b55432198f73332c0ea +2367498 0000000001da60586c72dde6f1453538a99df0229d25462dcb5bfd5460efd54b +2367898 0000000000ca57c1ca80125655a904060c23bca4c1b7b3460e0cfb29e6a01e65 +2368298 00000000015dfe386610f4edce4116c0cc10556b712423c3cc1ab784d15aadf3 +2368698 00000000006a115fd44680f1d7ef7b49eb29d4e8c7ba1e2292fd548cbba1efa3 +2369098 000000000171d07ceeedce8d9c3bd9545d02f73baf5081456d6bea05dad5e345 +2369498 0000000001a3a2511f4b91a475a31d4c69252b84a19ca3e170f7d928b8cbc812 +2369898 00000000006c08b9f06f11de5d78e96ebaa833ef7f1446d8beaaa069684b1dd6 +2370298 0000000000775aa180f0e14a0681381fe4a206c577556e80a79fab25cd0b36cf +2370698 000000000163e45981e5ca2096ccd7ba41521ec83cedbfd0180e5c31afcb0033 +2371098 000000000086796c0bcb3272bbb50ea545a7e4cc73dc2331319b0236ea63c8c8 +2371498 000000000109e9e00fb821837819c71bdce55fdc031233a1518b29e93da40fb8 +2371898 0000000000f84b55bf292bcc346da6f1d78fdd70d3890fdb9e0f8d5e6f15db87 +2372298 0000000001c2eb64880350e5ecdb988424b64e6ae9304a41e2b44ffe090dfbda +2372698 0000000000aa75c322bd661bb15413be5ba161ffced8f7b620eec96060742dc7 +2373098 0000000001528a8c45c245f4f0ef3c52ae6885d7f282395bb579bf77df881f54 +2373498 0000000000b7c2072ee99fa347630d17161b58fc81ae185333f05f7725ecd8cb +2373898 0000000001dc34fd14e390d898207b3b6413c073e6203dfee426e9ad56a2c177 +2374298 00000000007c8f9ad4c9c45fe40e892b2948aa2ec4092ff15f1aa3a7810f7e03 +2374698 000000000140f945018ad7f681f30370a7b809f22cfdb025db5f51ee0c41bea4 +2375098 00000000014ce3f75ce490a4f1ae08e7144ae90bcd62d9f25bd274ab03b79e28 +2375498 0000000001aac2a8d9c79758e673740359d07f5a95f4089948073396cd32f53f +2375898 000000000154d7ee1962216cb8e2eca3a98b117de98f35096b2f81f39bd94256 +2376298 0000000000a8d8e593a654609fdd658848c1929c5d87ec65e41e7e2137b1c21a +2376698 000000000030f6b8079c2e365c39f675533770152fa10d2f4a5630a231cb0b2c +2377098 00000000006f9aad19f4b2db3a10651d6d8229e6e4915756af40f0ba9d0cc2a6 +2377498 000000000163a61ad8021eee973eff838b1aec8eff0c2d68ab67edf6187374fa +2377898 0000000000d494672cadcd836de79374ba0ccc789aa42238156788951c51350b +2378298 0000000000726c98efa1106ea1b27736455cd3da2f8c9a4f7fa73feabe571dfe +2378698 00000000008dcc027579b47b662dc5512b0d9c19505117df086ce9b8fad91665 +2379098 0000000000d7d03cf499bd3d530190ad76696c3fa8436249861ec88d052faf32 +2379498 000000000070d6bbcc36e07e208d2ed2b79a3bde497656c9a18f851ae018dff6 +2379898 00000000001b8ba2095abe84937788eb1c48126143729d20ec34d1724cbafe6b +2380298 000000000086089323cc143edbd90af1fc486281b3ee824af98f3fd91e17f633 +2380698 000000000079d992d1994e187be142229eaae15d66ba74d3009142f899f2169a +2381098 00000000001976af4dc5bf134ec0913155f6c352c63aa092f529648d4d311e97 +2381498 0000000000d31f5b20ae74fbd38ab281071f109ddd5b0da9f4f6901caff2b13e +2381898 000000000071c857356d237aff31bf422f8ccecd367c28c11f40ec144dc6f3c9 +2382298 000000000098e3d93abc94c0cebde285d62a9b8947e59f8f75222769b191ff49 +2382698 0000000000d8de97184422d5a0e5eb9126245cd244a3abb65e4ae53f40374b7a +2383098 000000000160de753ace2b62614411d8b4f9fee4c0aa2e01761a30443d42844a +2383498 0000000000c4841d4a2be245946caab5d35f9788a36d1afd90c175a08a9ac872 +2383898 0000000000951d88bac60111f86f08c8cac4d00d73e8aa099e51a6644d930162 +2384298 000000000206fab0fabba12dae4dc0962b8bbaae6ed850b88f36b341b6e3e620 +2384698 00000000006d89fbcf00843ce373d2674ea2844b2fde2d71cfb69eba45e65384 +2385098 0000000000b0c95819d8214c579be7da9afbada451e71df3d0b554422ce36510 +2385498 000000000105bfd78e5921ec1624e4c5716ea24e008e86db4d4cd83f719ac93b +2385898 000000000095f73ba5b4c484ae45312847e726c257e81e9075e5633b96de345b +2386298 000000000181019bf43171251f009b5ccfd3385d3fdad06cd66e76f6c57ce8d9 +2386698 0000000000a510ccc6078735417fb27719fa03ec0b457709bf9c59111be37471 +2387098 00000000002bddfbf5811d2c38ce754c9f3068f675b87ee0e0416239ca26fddb +2387498 00000000009e9793f70693a474f7b4316df1d87e267c7e572e6449259349d439 +2387898 00000000013fb48fe9ceef6b9b1b2fae7209f5b9102b25e4dafcc8a05caa2014 +2388298 00000000001f7ae005f9512d7135355a388430fb6f454fce4d7fd54a9a0446fa +2388698 000000000022eb610bb23f0e77e28e6ca52e4b714447d0db8de9ceba91451354 +2389098 00000000000a14ab274ccaec809ea4ad59acb63d50a944e10e88bf72f3d51b77 +2389498 0000000000148d2a84b3c0753ecce1e815053ee53208219621cd259c38dd5444 +2389898 000000000100af6868a78fd8ea208d131aac12119a049058af62403be589f50e +2390298 000000000067cc97d333746420b64b9d7406e0d573fd411141ca5677143056aa +2390698 00000000011385e955d697d3b71b649fd199f9f59334a5171053c6820a825004 +2391098 000000000162ea0fa857482d9162559c7e37fb5521bc1f64b6912f45bb241991 +2391498 0000000000890c92e936e0b1e1dd78576cdec90a6c79c79f0c7a9ddb358813f1 +2391898 00000000014f822e3da5dd9405c3abf87d4b880bb8e69a3232329d7edb699a48 +2392298 00000000010faa6937e27f9b967fc1365261362ac2f393e31a1d987b236e6aef +2392698 00000000001818efa26b75576938671a501a43f701e7d2f24797d40ec6699a05 +2393098 0000000000b3615538fb0883c67bcb3ca51350a8d884f1cc4b1b1e62bbebc8f0 +2393498 0000000001b74e116c8d7e1521df1f79df4ed755c8668f2d269857b5abfef5e8 +2393898 000000000053d6499014a8fb7d5190473d4a290e48504fc2afe5a637f5f8a7f0 +2394298 0000000000341c36c29398e64ac4cbf637a7e76ba17d5b8ed04c1a08cc0ed36e +2394698 00000000010496eb92e5cde34273b0076c19a07aa199fde5dce7bc606c57a827 +2395098 00000000004126b4b42fe9d62fb92986a6072fe7d6c6767310dd0d34ecedb741 +2395498 00000000002b77a045724bfb1626e0f39452a898a22387f5d7394e526c184521 +2395898 0000000000725047fa9b3c64bdf9bc9d3df92d3a487ca3aa06731309d6a11493 +2396298 0000000000228505b35c47d8378d20c06cbb2b780419aa0730f87fb183de9ef8 +2396698 0000000000d75755e4e02cf7e58bba39b3189ebbbca772050a1eb07969a5e864 +2397098 000000000173a309a5d8414e68a5a945fdf8d12046257a8a3e2ed620b2ccbf37 +2397498 0000000000c8caf64b3cafd5bde4c9e9e1cf5d2fec5c92978b27b974ce23b9e3 +2397898 00000000016b7158c36803280726dd89e0d835673d184d5332ef797440b3753e +2398298 00000000007925d8b3fb3895d0ffc62fe4bf4563ebf579f760d675a706967e38 +2398698 00000000003c996f88f81ca2dcbe52449616595aa76f765fdab266bebd00ccea +2399098 00000000007f2cc5eaaeae9668646e322c7eefb0f6ad525c729a2dc0e1aed9ce +2399498 00000000016eea225cebee9a6166fb9b221600a5f4982864faa7f3eb2d266dd2 +2399898 00000000002cf3942d8a25a5e9b537856b5d7a7d8262d15cf0bd0b18aa901834 +2400298 0000000000289b1a567f45ab697b9f515bb9c255fbe03958314dd0af7263844d +2400698 00000000000f8b1297acfbd2e9e996c8aba34b4d7ed361c69693c4fe5391507a +2401098 000000000183a4ae043238529e2ad3a9fe344684bae4c9203eb38069f228d44b +2401498 00000000003b9621b719b109cde1560fdf0b5749f5b59d6bc19c906531beb2c3 +2401898 00000000004557c2ae494043740392024f96ff6c137f20d2ce801f55a1e0a58e +2402298 000000000034c9738f0e1f20360dd60eb76f00d6763c54866a6a51dc09fa8c29 +2402698 0000000001e57c0bc50e96e7d0a0a3c9b8630464bcaff317f72aaf403e46dffb +2403098 0000000000068f7b56378180c287b2f62e176aafd0e7b57852b73a7f09b4871d +2403498 000000000160a370849549fd7341ee6ece805ab4b7ac30b1592b54e2697ee11a +2403898 00000000005842348bda53daaab5ba063794910081c35b5c00708b1a1a09e2a3 +2404298 00000000016b1701b0215da2e75704b611cadcdbf168c5501f8c0d51eb1280b0 +2404698 0000000000e21a12bdddcb2c23fec71be74b5219a7339bbd9262d8fc2b5aba5a +2405098 00000000003526b8e4c4744a28bae1836e63b33fde7475a99a7838fab9012260 +2405498 0000000000df2476046737e6a245cb631b629f88c3166c77db7ddd78058457c8 +2405898 00000000007a3020bb6a20e071be56dba938841537ce101ab6ee1f54ca659ecf +2406298 0000000000361c509866748f9770d92059e23b2dcc06e157a897f1168ababfca +2406698 00000000002a54f311ed4fcc49499f3a7434391dc553fcbcfaba073fbb6aeab4 +2407098 000000000100350c1744bea796b8889974183fe3478da1f5fb4c6e488493ad98 +2407498 0000000000a300d89281eeaa5e9e6a273445b43742a36bef88102354e498c47f +2407898 00000000009e761db7f5135636b3b52d1001c4a48c1357ec277c5ca7978a7d9f +2408298 00000000013a58729b6d2f14c6ba0b2ccb71cb6709dc53405c4fff9e5b83cf83 +2408698 0000000001e2c8af8b9ca6b2ca7121e906c43ad600cc8c114bfa2be938b8a704 +2409098 0000000000f881f81c8165a833e73211c7bc62d017c7a2bf148303b2d8ddcd5e +2409498 00000000000c2894048e8fbbf1108e6a8f84593bf0335223c3c2128543d479a5 +2409898 0000000001c5aff172cf4e599395d534772238835404afa53808643fa61dc2fa +2410298 00000000014bcaf8d811cab113e55e64323a260e44112327f14c316e4af04b55 diff --git a/zebra-consensus/src/checkpoint/test-checkpoints.txt b/zebra-consensus/src/checkpoint/test-checkpoints.txt index b46e6ca51a4..bfa58c7602c 100644 --- a/zebra-consensus/src/checkpoint/test-checkpoints.txt +++ b/zebra-consensus/src/checkpoint/test-checkpoints.txt @@ -6365,3 +6365,456 @@ 2545600 000922846ef68b656beee14815e999df2dd44625fdaa9fca703354db588d1b5a 2546000 0008416c201ea4c6694b57a7571c4a38b4bccbe07a445d36d301d2d4d286a361 2546400 000d9091cc81528757991f79e116b4de846d76d4a04028358ba19a65e5acc480 +2546800 00061a911b6b016eecafbc8fa1f420c0cab1da73c631f550171e9a12c9cba32a +2547200 002225d86cbe8ed6f3eedffa4860bb5ccc5b8142910b4a63a0b1cc77aef96d09 +2547600 00002847b3ffbef7eb0b8e0706f0c097e7a2cd0f1c8ef0de9c2be70d49e9bf30 +2548000 000d2c7b78a515506fc8b7723cced15608be95b06b37e5b419095e02b69b8260 +2548400 0004881fa985b4fc09363bf09e48ac107dcc0f54dcf9b341c682e10c5d7be7a8 +2548800 0016e21a38f4b1e347e182960e8f39ea27ff448be8ceb11593cd8bce6cca9e1f +2549200 001aede5a8a7eb4357ad4209cc0f1bf4b52eced3bf1133318c7d5b4865ccb24e +2549600 00065704bc35d9ae4996a8d4e0ed149c6551982d100b0468c15b8446af3794e7 +2550000 00273e6013f4ef5698046b122afece7a1d1f994ca89b7de4d98dae507fec78fe +2550400 000e5948bb30944cf68e4a41ae7abd75ca30a88eb1bca0ac77d2d35e34da56ee +2550800 001aa13ddb1e513881f4b5a9f3d13a68ae6caff3c96ed83facd3e1aa95107e85 +2551200 0009b24ec4f62b40b3478a1eef87a400c41805a91ef9e7aac859647d8cf08a38 +2551600 000e46c59a0ab3b1b7f53b7cff708b1dbcb0428e43a4d0ff5efc9b39b5aaed23 +2552000 000a7f3c07bd1329c04304999f7cc14efd4c1be61aa588b88eeb731c04d27bae +2552400 000ee67f5831ee088add9c5d6addbf6804c679cb5d34eacd35d43bf491639e47 +2552800 0001e91f0d8d81b7574f6341edb8f3fcb2167ec4bb27082e82758758a4c9b68d +2553200 001349e2d2fdc953ee0666c1a0538c1bc8de782a65d3e1325e6e5ed6d4d9dcb6 +2553600 00125afb93b6c767977c18799732fb81bb8365b1f379846c095287637f4f367e +2554000 003189afbd739dc0e0284e20bacfc5667058c99e88bcf602267e17a41e801735 +2554400 0052fa2ff2ac92535ef8e197a9d9b51b38ef3281b13510c43c4f7e5f68552fc8 +2554800 00135f8d56b4289109633c564033d07bc84cc08e76f973abe1d3ec0911b21d51 +2555200 001b6f3da70955330b0deabc0488d6e36bd02b98565c78157051cf7546d05c78 +2555600 000050d85cd66c77babbb42f846247613ebc2efe3e9598084116abb12da1bbc8 +2556000 001a95ae3db0928f07d3fd169a168e931a034959a9f22c0d9f71708728916523 +2556400 00034e87f0ba759859f2ff759cf966a2cd79c06f0dc956558bb01f3b8ceee4e8 +2556800 005fa0dda557f0739bbbbad9c23d1bd63bf4a09859825dd1552868716616eca8 +2557200 00195f234a65463c38aab641bf0f1bc8e48f1c005dc4da5f84fb097f2d6235f6 +2557600 0008f53801d55779f05035aa7fd985adce5b8475f3a7c5318e08d79c792dc3d1 +2558000 000e2cffc1ce7d7fff3d75a7d0db8455459735295619318c1c5d1c4732224d83 +2558400 002536e9a42fe58f6d5796f737cfae208708cb4680b51172cd0cf030712be1e2 +2558800 00069a01cf05dc9cb32bad0372e85022445d211e1318fd9297a12bbd76c90fe8 +2559200 004278796aafe42e707f51fa65600f95afece0a44618bdbbc0e820e50f9a0640 +2559600 001eac240f34a27e5939d96365ab03ba5a3573cc854fe73c0f0d3270e7829ea5 +2560000 001a1df8cd5e636cda82900c4030432017f7aeda9ca60f6c706164732e70fae7 +2560400 0005d813fcf729b396b3f1e3231fe9897179af8af37ccb7b0cd25517e730a219 +2560800 002cd91a3722bb6c2d482f4721296b523d46ee9b42850ba2d485856b6800893d +2561200 0030d8221c13bc3cca7ea697e7f2a499a26b5b4cb4f82c8a65c33ee6f486c19e +2561600 001e49ec8e30f0f70bdf6dbaad69aee534f118f40d9e51e33512f226bd8878f2 +2562000 000490420bd620ea7779dd2c0df4172807dca7e28c21b76895ce4f717bdf65ba +2562400 00202f0e56ab8f000ba73ee6bc64f23919e88c3ca10ea93c1b3fc5763e943bc7 +2562800 0024ba2b1ffd69bff3830c2e358b1f831b03685787b0cf3e6d37a4cdfee415f2 +2563200 00bfcf5dc7d8196030f08aafaa36da08324a74d75563a2d7fe01744da8a4b4ec +2563600 0028cd514ce88d81f1a811fa67fae2e5646ff60f5b2e1308a8d19c4456bf5bf4 +2564000 0023f4a6154e360716c83e8e9be5ca04af18dfab8fe5a5b9da62e86245546ba8 +2564400 00022e96702c3e67614bd6e01aa92163e5b7b6ddb4b1bb6425c849715e91004c +2564800 004bcd61176fa6a939176bca4a81804ef844103b21e412a033906b3e02b6d803 +2565200 000f43c74538dbb93cae433d36084d8415b7d2f79144bb60a5fbf3e1507604e2 +2565600 00029738a8dd2af55b59cd4af0c7d27f1c3b020c4d7f694f835e15f5d9ee7c96 +2566000 000e3657d76e8df0985c96ffb9b200f03d37734a758c04a8818637b5bc3a9d84 +2566400 002d055d5710cc823ac81751a31e1041b8f02fd9d47c7b82fd1152fd2f223367 +2566800 0021a704b7a728203d1869b65f9e7f4c04af03c4e935d3bf7662cb6451a4fbb1 +2567200 001112d28564d144c3a330be7a655ea95f615423d80e2c0584ba9a2a65919e72 +2567600 00439fac4e90ad99d1712d06d14e009a17f3cac9739f1439d26d02ed1ea1e22b +2568000 00039fadf185b14b3b9a7e0a7a50f615f01cbdc98890934a689e1f768b21c2ac +2568400 000cdd63ca98f0406da85d00883f6f37bbdc72f6bdc6d38810a9b5ad039168ea +2568800 000ed1537a118f37cabf015d47497ae53d55bcc8660b590f82ceb2b487c2198e +2569200 0009df6416538d5a30d1f70c9e4bccb6693b2ab6f5b4f38bc1c987098c9a744f +2569600 000be77f174535d1957c3333aff64d04b7a749b55e536a65328d4c400778b4ac +2570000 002f16febc8fe7cf17ed80593d364d639f5702bf780a91a81642bc195693667c +2570400 0017488cc6f5cd9febb647eaf80b908bbe8615bc9242b7726c6b6afefbc85316 +2570800 0022891d6c749ea542e2b5bc0367c84fb24caa49434748d5fdf358aea42fb606 +2571200 000b33f17e8e6638239bc76780127cea919e56f9d732bf65d6ce07bcfdaa344a +2571600 004c39f92647c3783b2bc9b71c9c73a9e1644ef514ec12d108cd6d20cfbe03e6 +2572000 000a3b341a6fb5a84ce1d7870510a0d908539863345d085a0030ba66401a7e96 +2572400 00312f39480ae958888d43e7c10e9f4f7ffd6965b87550b8ca56807a3d005478 +2572800 0005c6de91a15985a8ccb67480555e8e08bdeb89328a49438c4bcd5b2b298745 +2573200 00191265a052fb442ad76f40416875d061327f2bcdcbaa2f54a2c9626737828c +2573600 001ea8784da364305ddd1833179b823b1f0ee9003b63c536e15d4f3a6cdd6976 +2574000 0017e1a4555d508a688c134b33cdb9147ee221445a4d635e70bc9d1a74102b05 +2574400 0037f92886983911ac65131fd65d3a206b7734005a89ed896fa9b38bd3278f4f +2574800 003b483a1b6c3797efc1b4f0c4bff9beba38c6625176de39dc1faf0ae3caa384 +2575200 003b503f6164ed3d5cabf1f9ca08b69a6821e8ed429231db8ca9fcb4cce4460a +2575600 001de6ae25087b898d963a459cbb735242e052043475aee54d5fc0ca890fed3d +2576000 000f0387c063b5fc87472db6b71793c08faca808e9967d1faa03d48e13fe68e2 +2576400 0033b9228a0ed3672e8b77b3c3bdc005fe55fff2c9d871ce33c0f94349beb0de +2576800 00465d49441a41e519fbf9e09133bf46f6de485d2cc04588fa4c69f11fdf0e0f +2577200 001334c5b97893507f222202ab2b2c939e9fe58bbe0d5fa57bbfcff67c428493 +2577600 001f4d88da9320653530666a8c3eb7d6b6a2d55cce797592546e0887486d7c0a +2578000 000906a3241e53cb7e4e46815f5d204456f25d2480c640c05ca932792a20bb58 +2578400 0010a40544f2bd825d4e7c0bfec330e19e318930ce5277f34d27dab61a1e8d36 +2578800 0029007c4db008d726ecb00d9b736fbcb19e3fd013fda41b651cc6baa170e817 +2579200 000d3075bfe205c37ee3ca5cc9263eac72567977e60c4f41142e74a83088f789 +2579600 0010e19b074242c805455eddd29c0c7a4b5d302c939213a951fa06fc4e93dfa7 +2580000 000d717569dcdac9458704cec46eb7fc67ca105a20f6dbbf9b21f0b2a0a87910 +2580400 001eb1b827e96863ffb0b77f062ace47439b1d4294379f998525a316ec836d6a +2580800 002c637adc01af9d1e5bad80cb867845fd83977f6ae81f004850c3443a8b73da +2581200 00196120803818124a8fac78ba371f43331f796c60c7075f31e6bcfd0076c761 +2581600 003e015bb30b0e4fbf1bf10140703990e6e915fafe68869f3a474334dcd832ba +2582000 003abbcfc96f1f198cdf643ad7ec87a0d0fdaa72cd3f6089a1b0193512cba9c5 +2582400 00180eacdb358d2d655bc310f46124c4f3b4d30f19fb2fa31b9ea9007d34fb8a +2582800 000a8c0ffea7cbf0fe4c0bc4ac5d05daf9ce6dfdb69c5835e6a2a94bf6c085e6 +2583200 00334b987ef209cbfc7c119edbc949db0aa5f33a0e9b82f4a6514593eef85e14 +2583600 00204748cd2f3e5114e1e72e9e1c34b9b255a40676efda6b65b1cbffab9a2751 +2584000 000099bfa6640fe7e4e3e80f00f837fd3a9c18e46de6c7b424415e60e543dc18 +2584400 0019f3de261dd1159785e66e4ae6064f4fc84e3a5d732058641c671c6449309b +2584800 0030ed515220236633e68ddc881f8ef78208e8e83000c1e503376bdf6e88c255 +2585200 000c8be193c6bb414f656d7eef889ba21659b60e012b478d78742bc5df3ac697 +2585600 00035628329212e32bc16c4fa2a23b9ba7848c2b78cad81c5c00fbc69dbf67a9 +2586000 00098b106c2657e848413683d5a4052806c5c5ff9f15b6b55040cf3622431d54 +2586400 0002137522f0df162f1a924ddb786453d7c4aef0f69003979dc79634a3ac6d99 +2586800 00172c8381d8ceaaed83fd480fadc0e5b7ea7547827db5925465dd85332b0cb3 +2587200 000b8069a18a8c616a892eef778f792a47039bab54d6871e3955c326b8e5163a +2587600 002f9141dbd03e50cff2624ffd2f670d1bc1482b89ea48a0fe31074f23cf6b78 +2588000 00174cc0ea5e1d8d278933cbd3c8bbf5f1b94dda765fdaddbe0d51a49aa7040c +2588400 0037e24b1da2048f3606af275ac66458d052ac82ef408ed7e3e9ea42d00237fa +2588800 001027dbb204a514a32a790ffd871e03a61ef9b2bfcffcc9f7b33dc45204575a +2589200 000814d9fdb2ab69304e7c577fd0948fb93c61922cb2d71123664a97dbc23fd8 +2589600 00075b40446b6378495ac4c858a395ea4b341e99a192dbf3fc999203dccab36e +2590000 000e025af081ac1d5deac23db13a487a740c1dd11c8edbe101da87491c59987e +2590400 0024d0bb30b7f8a2a9c8d699480d303e0389eee3827b709d1ad0374ab6b57ab7 +2590800 001036c4bad14405ecb516fd7702342e627221558b76890108d49b364a62c2d2 +2591200 00124c377751a06275b51fcbe9226a614d8b40ba68608810b2b23279f8a896cb +2591600 00511575a33289ec41bdd9322b7910c75cf1858dfebceb7012be8a00d8c5e25e +2592000 002e88756241f925c2e583c1591949c1b37a76dddb2b3bba1d7fce049d6cac43 +2592400 0025dac86f46c07862c46b8a9f133eb6996b05213242efd0acfe2ccacc247b0d +2592800 000cc2becb78576b29116209bd8cdb218cff7e82b22f1d19802dcd43bf8d6b98 +2593200 0040d299bb3b5382f40b4a421265643349fa0453788e93e0a31db7f25c5a80d4 +2593600 004dafccdc8152cb85e8b24910396e9ea1df47c2b82b36481c7ca402c7f405a3 +2594000 005c1d143405266648f766e036d71bbc965134b7675a52c661acae52a68a671d +2594400 004e73df55157a562d6908f36022a4b39a39bad6abd423e45143b643714b87fd +2594800 0011297526d9a133adb068ed9df83c4ee66b4da3c1ff59590396917a94021f00 +2595200 001de1406dffa6755a8330043e93dd78245f283deb97f2c9f869e17274139ac6 +2595600 00139f3ce586a51e017eb6dfff7e6feb45847d9859a2ab89206e5268c8ade159 +2596000 001d6a711a1a7df35944b67bdf7f7523f49e48d3657535cda770d3d87670d47c +2596400 0032058829ad6ce5d26af35441ff85bcb0c80af39b41fe3c2f941b6dd4f700d9 +2596800 000aa5f78f90c8ea147d56584edd71c459608e872029dd2c20805b12c272a232 +2597200 0017c0d36cef228f71c3b3e0b1ef6d36219ab634904ad269b5caaa02503ef57c +2597600 001e0974f54783d6116d659c06cd652d53a13e29cf873220f9c5e6fdc6ac9ab5 +2598000 0020795a3b1781c937e0d0882af3334d828aea68ae1fb5d0f4e48d21604863f7 +2598400 0008708b94df86b73a4df0b854b1655c824c249ec8ea5533604ec67e6849657e +2598800 0006ac1470574bd39c8d9ebd2c815d069c523b959d7adcdc58033399edaf6aee +2599200 003a78d1e2ecfe5410a8abf8433e9669f3dde8110f64e3dca5391a0f0c7facf4 +2599600 001cf30f48f1cb9c62ef1d73f8385c48ed77a1f4c9f5924d65e8c266cfcf5252 +2600000 000b9ecc8bd155dedcf64d27771fc79b8b753fef0d8b0a2557eea1c17b0d8fe6 +2600400 00002605f44a66137a91d8fef93d4e3eec0e0b9395793f9157c5a4d475fe84dd +2600800 000051eddffdae0a1b66273ca2af7f9dac62e3167e6c18e26b43be9a126c09d5 +2601200 0000455b4081b8b0acf29c3ea83ed2655573fbec2756f633983ca72c498244ff +2601600 001fd7aa0334ca299f10d642842fe594e112263395e46ac4ff25e899cce6fa8c +2602000 000b65fc715cbf19e259f88e079f195bbf410435522a4edc58d478f0bdf40eb3 +2602400 00005a6a9613774b931a3d917a8b5333d3be2e0219334686ebde3835e47c420d +2602800 000c8ca93453ad7b227dab39a8f4805f2946a02bd313a69b6b88973a7d23e504 +2603200 00077a7b86918c789f6d0d918ec3a0509987d99b61fe92bc487be20adab8fb10 +2603600 0027102dbdfe96462d5a2a05d23a8e61ab1841e2c042a3dc3f2602d3b43f942f +2604000 0012fd14f8bcbe8bcc90b9e94caaf2aa5f846b3c52ecad1c8747ea9f8493388c +2604400 0028bc5aa39fa03f9989e7b39408829c6294c2deb1f3a04f02430469df2fc846 +2604800 006c91f3053b8c371937d913a57a60cca07493e648a43683d5c4458b6cb92df0 +2605200 0009ef1eb52e3acd1f5b178d04423c459f71395a547ff8f31e8965667cf2d646 +2605600 001a638a38e8aef76f1d251de55b433321971ea18e0940201f988cc4a7f226e9 +2606000 00555da1c42794eedcc49866871a86ef23f1c2a9ab8af082748170d2a89e29e9 +2606400 0023181dfca2cc2344a74811fd1733a1552da07f71e6337163a6c173d6e03edd +2606800 00669a430ee16be5ab026389fec1ea9e787dcca22822646066aae3c12849465b +2607200 0025912f1afbedf044209f4d5368f0f8cfc7ac2ae2a36103113cf55460589a9d +2607600 0019e55469477117859b855e26041bf0804627fe9f61821df8ad8f33e2271ae9 +2608000 001111631dffc309c795ef5a16249f858cee35e46c8d74b0566add6d71ffea32 +2608400 00113e9d77b1f8af57f0f6d9fed408d6b9a96fc7dc340d3e4d4b9e7ceca743ae +2608800 00007a34e8e3fbdb32f4ec512456d0d3705549a9054746fc2f6e1d0dbc024f79 +2609200 000fcb99d36ae30f9666a70195e1ffbda74705035986f4949ba7295747b0e3ae +2609600 0006f37ba23902325d30d2ad809f15683e0fe14e740723a0712a67de0e2e862b +2610000 001ec0dab50faa76f18e6f909e47fab55b1c4ca8d3e71ecb2ade5b4cbd0e5fe7 +2610400 003c6d96aa1d919ea8f512e181093d13d762d2d8dd33da09938dadcd260e1e5b +2610800 00141e7a4b4b6648fabda568a2c5fbb2ece5e86243efdbb2cb6c21df681f69d2 +2611200 00160a4588bae7ef7a8fb6434e9ba89b81082ab18ecaa5476cf743eed5c826ba +2611600 000f7ccbf43acf6b3bf994154cde06ad662d14ebb88c7a40874a52c9d2b1d73f +2612000 00013bad8b72f5a971ae0955857ea53d64d670028ee73edc4b708cafc937a7ef +2612400 000392b5bf5b61b83139267452490f66c46e456e28f016ce65beb00b999cbf1e +2612800 0056a08e17a40b4fd706411c71af523f7523fd0800d47be7742b674027f414f7 +2613200 003b86674b0da598750b88ed4eea867a3df98139ad1b45c4161c4381f0279244 +2613600 00182e65033070ea0aa3c39139fc03787257848f2b0ba235e1bb6089d30c1124 +2614000 003ff6847afb29c3c2e6f6c0efc87f3b322676d5f321da489d325f4febc9d90a +2614400 00185af82c7bd90cf3af6fe6aeae937348776867791e2e11362737f4639e93ce +2614800 001ae8bba5e7330338af23fd447272be606058d7acffc8f7e5b4180b43402cc3 +2615200 000a5296eeca33f3444b5976ca2c8a0d073bf398adee39248d635d55ee0d9b68 +2615600 000721d297f8a02edb3aed58d9e71a483c523fc03a129a4e95fbcb0e64cae62d +2616000 0001b34090adc1a19be3ef6a6cd90876ad39acc55a66634f2c2c18255a1368e5 +2616400 00154d5280982e05fd58f43d233709bc7c226d57c0b76f9b2f603698fca69ed5 +2616800 000059727bd647da6e2ad163d9d715084b8d9c4986dacd7213c2268c626ab9f7 +2617200 000455ff6a49dc313e2a4e58b60539ced01ab2c5ae0d80a8ec3d31a472119a15 +2617600 001e33b33a92762aa653a90e3e596e32b9a73d50a9856e72e94815d4897e6335 +2618000 001c6db62adb702e52b76e86e23b9b12f4af0a7f821e448cab977e077ab2311e +2618400 000ba9177eb0ad7c4e600047a2d151537e23fc4e10dce28e99748f7fc2ba4ab5 +2618800 0083378af0cfaec4ab860e94c0eb43ac3cdd22d269568fa4b0d95daf59ddc1a3 +2619200 007658e938a2ba766fb0cbb0bf5a9fc408c36ed762867073d4d41e78c7bdb2a8 +2619600 0011111464dccc6893133287125ca8fc612a0734157ef845a8f5a36e938fd3f5 +2620000 00139f229081782e5f0238fa02da7b0dd7516a0c6c8f7e3bc1940f841146f32b +2620400 00021eff77829d103433c08290df34d7f119744efc5e0f8a3080917ca062c794 +2620800 0015463a631c1cc2c2bf4963c78d685261805638f0086ba77479bf0e814bd036 +2621200 0020b50b5696bd12e97f13067eb772aedafecda7fa01b2b19bc243cf69bee223 +2621600 00a3af146bf8fdc7236df23c8232f8ada61bec043b3d011af143622e73b1ed42 +2622000 0028603097088eca9e801ef90fadbab686d38c21dc55603b9c97a4c0403a64f0 +2622400 003a75a211b2b5e127aa2ca7df8e5ff029fb2e6cc5b72370243315b8e59f8dae +2622800 0020f58f09553e6a5e55bac11eda3b8acb23b6d3d9d05512b643815a8b0988d5 +2623200 002a767de1abdc031e77cb0e21e76e73db09b0bbe0e2903d3101f4990b098f8e +2623600 001f76d6e7a436cb7f3ca214155435d1659d4c6bf0e8898e365cb44ef45a0f61 +2624000 001bf61a6f4793ab701de9b156be1776bcbd8aab1a61270f8c7f2ec3f4e945ce +2624400 00132e67d64b00ff49c871ddee6448515a78c1db4809a3db811960dd15e1bdad +2624800 000e9f281d37727e75b82b69bc3eeb0548cf9c07ee2e53aabbe45c0e261f0fbf +2625200 0014f02fa24b108be7bc24e83a8fafb3e650fbafa320b2ec34cd09148eb27963 +2625600 004ad15e2e18d5b8cd04865f51ec8b11a64edea0e37b101bedace43331c75010 +2626000 003d63d4279987a637bf29290ac0f4ba1c478b09aff9ff929ffa26ddda422f40 +2626400 0021900b43bd6a86803688771dd96b5a2f6d18592b2ef11d549442ba75cd5e6b +2626800 000796527fd698029df26afc7a26e33016b54899d2872e7c08e7a7d5b3f59c6e +2627200 0005b552e191536109fe74dcc5106a1a06866dd8f4d11fb7648e334fbdbb417b +2627600 003b87a8b6117c3f58bdbd6528dedab292f38488f0ce843b5a35a82fd8787725 +2628000 0045a17f17a03ae388415404ca1d6e3a79406748a2de28035caecbc05f50da47 +2628400 0013bb2f24539d282996e5ac15b46bca6f1dc119b08f276a71e213ccecfbaaa2 +2628800 000042b29fcc804687ef65e5c24858bc8c240166b753ab36335c7338ea4659af +2629200 000dda8dd038e86932fd12c1c542abe1079b2345505d6b1640aa6732e1c678cc +2629600 000b05eae4a1e1c60b9f7e17a43e14786c798d287d9602158d0f91ed1c8ace6a +2630000 000e0eaa2eb7113fda5a55aefd5ebfd1b73af4c148a15c342b92b23d31feacc7 +2630400 002271c93e342145a6f96f987fe46816473a5b8e8f1a9baaa52819d804f74817 +2630800 00123e986c01b3d36dd8d2e0d9ad10e440e330d158ab761f71607298fe0defd5 +2631200 002d29ac2e5a1916943fbfe14127efac74bdd852efe677773bbc0545a10eae1c +2631600 00047b4db7069da9c4312cc2a5aedace535c550cb764b83e5d8cba0863b8a27a +2632000 000ce6fd60b77b8af241eeba817d4a2748888927c20462b5941b9b4f9bda7ed9 +2632400 00083dd02700faa5fe2e899ac8e2ecf5536c47e7ce92c35c02c9287f770f55d7 +2632800 0031c0e3acf151c8d9ed7a73deac256b27b386b45214642f3ce45d60b3886b7a +2633200 0690fd6455513826157115d15269235166706c908950d1715dcdd38a16225814 +2633600 001b6f45dd913dacf99342ac955c54a3e7105c34a1ebdd8949042ee2aead3e92 +2634000 000d02230499db33ce16f286a8dd589dcb03f9813b56ea3dadb3673ec655bb0c +2634400 0036cf7a6b835f57173ad1a9d86ee5e8836e797db0ac6a41d028434f2cf8f8c3 +2634800 001758193411d899a5bd71319588ecec759ac499a4529e52a7034e9150d2b47d +2635200 0003089302b45b865221c91bb74f481170f4b0db3a40e397343065b6b0851ce6 +2635600 000306bf9787c7954d2a308aa035190ed7524079e9eeeba8b77c26ffaa2c5a51 +2636000 000fe115b72e96b3d7800baa85715a1cf0fe9f0f1af476df936651f6b54a639d +2636400 00090cf7e7b3e1efa06d38c6809db9f4bd81e948116fc5f53ff2a228c6efb139 +2636800 002e787b2e3a202420b43e8886412193e49faf62f8687ed0e652d2774ce155e0 +2637200 000574a9c7374a9b9db30451dfac54ab141ea76363b3086a8d976394fae136fa +2637600 000687dd26b8642bbbdcf72b7a08a937799a3a920fe1dda921c6f5512fd17bf0 +2638000 0046ec120bae51614d5bed3638cdb043d0b3bb349c90caf5818b674f13e44bba +2638400 0008a31b18ffc23dca98b2bbfc08d396313f99c3d1fd406ab0a243d6e30bd952 +2638800 00260b8c303bb6b0d4a096e3c81372e533c2bb248e3634cc3d1015a6ed10bf2b +2639200 003521f5b24c96cc5d1bb9519ba22324d95b0521adc7f4c9ef76199b3afe14f4 +2639600 00276559b326a3fe51e693a5ef9d4f23de6e1ca63cf51ee1676f4a014488c726 +2640000 0027519fb60ed1e3096fecc6daabccd1b0a42f7bdba3e0faa815f204da7a9d01 +2640400 00269e5cc6dbb03a4e4471161283bd671e8ea251ac7dd92cc491d2f75c8abf0e +2640800 00095042b07b835b56af5591069f364dff68eab5dc86796f8fd098c36e1efe97 +2641200 00319acd55f16e951ca429563c8ee0ab40f7a9635ba73882ac8b15002eb355ed +2641600 0036f2298adf150f0583359de9710c058d9e8749b64eba3dbf7052f2365a6060 +2642000 001b6172daa6c1da664fa0a97b87e891409d4d85be579694c649c1dab13f6960 +2642400 002a642432d4e067c0bd35d9dc89ea8236b1d52e34e2c453c82f9180ba702b28 +2642800 003701ac115a36272eb08174ec36acfddd08651655e9499d4bff3593c3436463 +2643200 00169a889bf9284a6943a08c995dc16451ae59e19499b776caaa4aee217f4547 +2643600 002c55209ff831f24870391935b61c7bb5586c9b9371b733e43f0d0093628f6b +2644000 00232d01b5491efd8f6a06a12a3c54bdff3be29fb0c3ac637f3bb54cb40b665a +2644400 001e9532b3bb3e0a7cd87ec9a15dc96e5b62273f5f27859a8bfcad21c24c0d11 +2644800 0033b1d324e0332d91fe588a2ed24de1381731d1c681a61c50ed6303b213ccb3 +2645200 0021c16b1a366d706cc46e051a448dc889753a3c90e674350705eaf7aa4ba782 +2645600 0017f4a186ac724cc7d025b79e8d902d6d57a80ff453dedce51d0e7256c50215 +2646000 002ba09d028ac944804bd4ae3987a8bbb185830ead816e8f9a708799a9ddf1aa +2646400 0033c614d5de2e3fae5f7cbdf6899a9193a304d4f197fb761a3bd507073d7786 +2646800 0049fc0d8b717eb1807e8fb7edf40cfd815773e187fe93c5eff63fb5de4d86bf +2647200 005f9882a201a897ecf6c978e5bbda69b259c89143397e5ff40a9210db46b6fc +2647600 0091daf4985dbe8fe9dda2e8887821752e29e4385d91fc85712182f87b2fefe0 +2648000 000e4b603776c4242f7dc9bc1facadede2fefb800b59e2f2ff6f0eb531c0100c +2648400 000dd6a4c8b541a8d001ed1344200749c8cac30178273fbe26b37eaa249f2dea +2648800 0005b2d28a33fc4945c1b375102ed7549f1b2e6886d2dec270b453cc8ef19fda +2649200 0022a9dc5033c114fc2ddb398dec196f457aef166cf117bb238ce579536f8eac +2649600 00290fa622bc4494f388bea61cca1cd8c6525bd810485194ff882963da1322be +2650000 0020045d31ee016244ceb6615172978dfa05150317b23fc1b55202e262cd8b0d +2650400 000a86a5a28f5cfe373613e91f07b4626010dbbc3be71665aa8c5e2dc381c9e4 +2650800 0041db5712af5f86bd508e5d199f96ec414a723d9b1dfbed98710537845b7392 +2651200 000bb71daa4936593ed6bb2b182b74cf2fa28bd1f344dd82f92ecd529dcbec68 +2651600 00423fceb8fdb8d4cb0417901935d22ebef0e06e95ca760c2b0fe70523889a51 +2652000 00843f3668a8e13da739b67e7e1508e47022780e519e502e9fc79a24606e639c +2652400 00263690277f56402a4c4469a927bf752b7177864195df5c21fd66b071ebc84b +2652800 0005566ebb56f46e348f9dd1b0d92268e15f44bd12dff232313c0010907b523b +2653200 0008f017ea7135ad84a684e402a518eb7576aa8014af13ebf083b7b28b3ee413 +2653600 00155760f7e9a4c30a2726cf65b9c03ddb4ee4044e21a9e9bc55d193638d7856 +2654000 000e98773fb4ddad6ff0c5a4c6f0ada8015e47812c5aa885f0de98379ec9ff25 +2654400 00239fcf0a0c8edfea7a874b9e4e1bb24c9bcf90f8dd987c6f73b1149222c529 +2654800 00234344e37326a811d2ee5ac019660532454f5e891c2e33b586eb94b77b1b84 +2655200 000f364cd35266a89a98b9a0f1f8a60e685e0979536b2e50ca420285832ffbed +2655600 005754035651740d2b51f9e8db3e668bdadcda2187cdc357eaf3df2fdc9850db +2656000 00372f81708130e80f2d893b4eb525e4a562cc5e3decc1bfb8baa793984fce02 +2656400 00272e95588622071ce24bb1ee97c134ef42232b293025b836aca6cf5bae88c8 +2656800 000b9c20b5092868bb4404a51c3233e42ad70d3f3ac34873d0d365c3b9bbfd6e +2657200 0021e536bb645c1b767b1578b25583374e862eda75c1b96fd0fda33935c7530b +2657600 001b0dc227b3d85a72d17c88decb76973a960c9abe14652e80226c23445b452e +2658000 0002b5780f99ed0a5d8dd928ac6a4ce4f700756b4bf9bc778cd968e36b09316b +2658400 0589b2eb8e9a2ae7ca28f87b282e0027d90ec69d832ffe555c600404b3e15d91 +2658800 002e671e3bf3583230794a21212fead66d7d574a97218978c0fa7030d523fec0 +2659200 003a1efb1274bbe02ef0dc0f2c05c8221371d66f70d65bc0f403867b27b06876 +2659600 0014d1331cfbb92181521cdd6c86b2ee7423d5be635a0c8b7da51d106f57da6b +2660000 003e45cb6c23b2b7bcd08945e7f11fdb0c20685371df8c31ef206d669fd29105 +2660400 000bf56071399e8f39d1fd9ece9a3c0dcb08b0597eb298fd1602c6b17dd8cc0d +2660800 00322a948375b7429c50404a3c9764efd445076ba881bf605873c51ff0992859 +2661200 009e86db89bfa90e3e12d192c4ece7d0041ec40f79271f82905721ad5f47e71f +2661600 000313bbc8554fe4815369751b7b93116438757e9930eeb68f6fad8165cf50cf +2662000 00206395da87b92f874d0d75d88c73fc780d96e7f836524a61a973dd75a31c67 +2662400 00295c2b266a17756323427b4cf69f54e4a087ab3c9f906bbacec16b77151a65 +2662800 002de2a2742daff537693435e4e9631e63888a04964af61fff0c59b1248769a1 +2663200 002d8348f2ce2ca1859114056c6bbfba56b909f87a322de57966954f153fb389 +2663600 0004deab6966c924f6cc8ff02e51112b79281dcb1b9cd89ac9e7864e25d5b306 +2664000 000382fbcc71c9f4ebc0bd185f4aa9d8fc40755bb802a4cab206d75c8dca3187 +2664400 0007e57b18113005ba0a8657f0c5d972c1103810d0f22afffe17d64a40ba3829 +2664800 001a237eb49ac0b77e46a6704023adc7954ad22d4564c55241cc9bc3279fc83a +2665200 0041542d49ec52cb8515b4831d5f164b95c3d2c4c4534bf71a34a93bc0d3634e +2665600 0085bc6c8253d99d06ef5b659e962c00d7a74c762f212a89095c0ddf3242e012 +2666000 00add30a3e00d43aa35b57a7827cf25a003320eceffbe1101aced2bd839d1bc2 +2666400 0016bf79cc47bd41cf5e590e6612c120f740637d160b7d3b6df63025160106b3 +2666800 0016c15c67fe9e6b8a372eb621e263e87a1412593a43f0fe973747387f333fe5 +2667200 006c113875bda803de451386ce4c3ecd67ca941b44a05a2b2f1a116060e808cf +2667600 000fd66f6446d05dccf7d28d056e6cff8a2eecaf1c3b2d1611a6335bac519641 +2668000 00017890ea1077664656a4b4cdded9c3704e2d6d75de24494202fa6cc414fdd9 +2668400 0000e11b8957e0690eca4346d57ee2f3d9bfcf583b918b4962d96be184bcb6c2 +2668800 0039c6fdb78c2e1cb175958a6c7657d438fffc583125a4214c9a14f019e4a44a +2669200 002e7bd046394c1106bced0507c8d6a249da0f11b37edf16ec2ecb2506d65b95 +2669600 002c55b8a5332efcdab9d7fd27f83eb43f3cb81dda9862a7f096b558b47ded8d +2670000 000496685a3572a7673f51292ecda4603e6bbe4a4f68d1de003b153f99810a72 +2670400 001db42bf140da647840c4b0b677de6895673b44a31d19bb8958e3c2a759162d +2670800 005370fba03ed46b54efb1201e3612a9164de47ee04d3039e13b820bbdf7dd46 +2671200 001ca37247f51624f86cb8c7d1bac3cd02e5cb5fafd8a90411b6aa262c46f2ce +2671600 0084600eca47209e232a72840ecd6fcd982d949188e7c0024dadf08fa7b936e0 +2672000 0017102343b3cd2b4b47a2771b7eb9a6e2cb6177555cfdc002a9cd0dbf983a6e +2672400 001eefc979d263f6d099b03b5efbcaec455d4ca4e6a088a024b9ffddfe2541d1 +2672800 004546da58d6cc22b5b135e1c34d5c61eb2f759d295dd73b94b5d8b9f319403f +2673200 00776c089df51976141b7d587e5c73f91bcdeeece676c31dd93ef86d491b662c +2673600 0021cd96506483f6379ff2ba4037ba1e2ffee945bb283892c34445601276ee77 +2674000 0012acd69facf1584ad23f1189681229e62e84fca06549dfbf5db36eb2ff380d +2674400 00164a564bd431e7ec7e601953c48e0aff0eff9497eedfe0efdf3ab9a0d05263 +2674800 0089c15b178e83e1c5183a8bb260310dc31102b2ca0ba77f6d296d5290d58486 +2675200 00368e0f5cddb422f05a718f93279f3fd41541352679d42c5aa7223034bb9413 +2675600 003a5fdca74ef0b4431268bed42b0e8cd77f5d0fb98a43d4351fac3237b2b1ec +2676000 002152e900f992127a075bb8eaa549ab86ee1260848d59018fa4a331c530cd21 +2676400 0089be0d44be234299b36997d612d16431ce949f10499f4f606474fba24bd2d7 +2676800 0733e4bb94f7f3f1e1f8d15b246125acd70342b0025689b0b3802f89fcb8ff33 +2677200 003706291f23f99073b78acb52b1657936c2da536d9b33f76e05144a792efd89 +2677600 000a5b9e62504a7dd8a617381e280a194839625148fdd29a25f27295e63d4162 +2678000 059a2f14d55f617ae8f08304e71c5dbcc33594e86597e10539267bbae740a62c +2678400 0069b1056ca984c296523fa99ced5b0ba7c1163b95c0c016bd2eebbe5e89d8bf +2678800 005c584e8579d490d7261718ec4a15e7aee170d774d5c665aac5f7855ba540c6 +2679200 00340b5715d51b5034567d108608a955121a5261c8dcba838df689e8e24a20c7 +2679600 0025d5e8396cd11706a0ef2dc501685e9a7a96eb7639609689bcb0f9e417162d +2680000 001da0561aa86200d0c9132e8581936e592e548d579cac0d35a1a19eccf1256a +2680400 003612acdc279dccb81e7dcf669d0aa252ddb6d9e68279348719e6e49f50032a +2680800 0001839b33df9c69241dcd76ff8c6aa590500d79fd35c69ed495689125a1dd09 +2681200 002f0cf5a9fb1221298b03ec9ee707fa92112c03ad882958dd33121517ea70cb +2681600 001f78b187b73f70bc67b3b41a93bb6ad16dbab9ef93a52b14a91337c2e5d276 +2682000 00255995257e77dc5b3901d79c93da3b61e6de244fef5d7f94c9e07886761796 +2682400 00104ff976aa321a925c6a0c183774ebff19502be32ff11eaad0b366213d1c74 +2682800 002a4df36f401d2864cde3d1f846da38d585cf1190f643801d41d922076d73a4 +2683200 0038c2ff3dcf17e19f8f5fc94f4c00f32a3647468e4756e8e8af622f4d8d7cf6 +2683600 0012d47c0ece2cb94cc3ddf11e6dc53a7f2f3c24b10e0ea50adad72b64d76730 +2684000 0010f2957fc53a68f2f79c0355f895a42f03fb7ca1fcc0140dd9521cc5a01e1f +2684400 00374943e133e5ee75c608633e13583bbfdcc1c16fa5b117108214171c591985 +2684800 001def30235bd75ba64fe40880b1fa75efd524cb13a820c711b46622ed60ee2c +2685200 0004a066b0bb6fc1f7374e821afff34c5fe63b3c622e1ba6a123f8f5777eec46 +2685600 0033a4b6856745edba75f935003f0890fae4e82239d334b786beea1392c03bcc +2686000 0036be328da5acc624574d5fed52835e79946d0e75ddd9857b50ff21e0795dd5 +2686400 00e00c063c8e8e4a0b5f0cc3775eaae5f41b881c0892e21ea1b144b9d3c5181f +2686800 0042b36abc546ae02fdd4f83c64c945a25dbc0e709cab17a997c4b53a41bc8d9 +2687200 001f6cbb2e309c474e4a0ce39c9cf469280e23451f0d7222966e86da5619ce20 +2687600 000cc73fea0ec3792117f0bb2b5ccac6fd48d94b3598abcabd932b650495565d +2688000 00355d7303356bb8fc39c3e4cc9b6d6771a4f26a9039f96b3e060526f928b89b +2688400 000e2979ff4866a3495c9fff63c96772cbfe58dc3372524972fb64ca538c4c21 +2688800 001d902c929fc41350856ff3150956d2344d4caf4c7df0a2e55519eae39a09c5 +2689200 002a41b30826bf852137fa50a0a6e18f87c8f44a58e5026f9ce47361bd3ebf5a +2689600 00666b62d4d1de42519a3b8c5915afda86010c20f659004f63fc429bbadd6d12 +2690000 004ff8728b358d33e6c9991a69e4357e0ea7a83e541c7c3ec86b3a753b9b4df0 +2690400 002b568b29b2d8827be359661fd3dbf58ba685e1fc31dff874330cd054c718a5 +2690800 000d69c76ca9e461a751be0610b5d49373be89b7b160709c677d0e2acf5215ee +2691200 0047a73ad2a733005c22bb452517635914a99101c49ceb0e0451272abf60079e +2691600 0022f2c01a555b014793d86ccdc5aaa3b73f9085d16030387133f87710003af3 +2692000 0042d3bf224cdf7ce4c61d614af25cd1e15468c1c53a1f40f51da7e753d9ca1f +2692400 001fb07850677fb053b69469c97f2dc519302dd16ecaa13d59cc7e4b1382fe48 +2692800 000f915d0480894fc24a2af1043ecab2827f82119082421533038d6336af0960 +2693200 0030c3ec5272847f9edeb258739b20349505b7985cb1822f5774fe44f6d94e76 +2693600 0025a091dda76d337d215420332531e0258579e771c6b26421ed7ae9bf905dc2 +2694000 001575c9457d110c90dcf1e47a5d6634d87080735c5242638cabbe4046c41c13 +2694400 0022ea65faa8d5674973cd96c1c9d065b43c90faaf8befc4aa5a0a79d388a1d4 +2694800 000ee7f466aeb6900e3dd3ed728a90541f442daed016df927a55adc0230e2562 +2695200 0022b907adc837a5489fe0776455bd5fb5573f4c10c5d8b1253f7f0107b5d37d +2695600 0008fd91ba75ce79df834144aa649ab60c7c8c044655afd956cd333cdc26535f +2696000 002ea266008d13f2f366ba996f734d84608394d299a90ca465222e3700844086 +2696400 0005f77b044c18ccf70c637ad75e6aaad16bc386602fc7a899aef617c144b766 +2696800 002a8e307552bf652e29ebf3a479606412e5ac8d900ac184363d7c81b88ad452 +2697200 000415e99190dceb754ce23d0d59e3a6c87cd772619e8a36ab916d5482f25191 +2697600 001741a9a824e7e89c222a3edc510fa4ff5ee5af2e314ec41db0ed3844225df3 +2698000 0008fc2f48c3321f7e7d87af1d06ca51ff28d87d0c524d020f580707ad2a57c1 +2698400 002dc5d130fb8f3fa0a93dd4908dfadb4e1effa4049f1026c43de6484f8e30ef +2698800 0004ed29d09d28e2ab9252535e4a5631eb8251e9eb9843eebd35cf01fa10e57c +2699200 000e7ad461890147d175c0859723713f209a6f6701f545124d96d59de2ac2529 +2699600 001e30fa80a8d6ad35aa1d27342a2b5527bfe59f44384e412e84080fd577bd38 +2700000 000ff1283f20cf8917969eb07693eec71d4990677ff7bbdd376d079d42e8945a +2700400 001f38b2e51f2c04feec6f5cedffa5e6bb6d774ce98a9d82a9a4cfbd7d79d057 +2700800 002951816a09a593fc74b41e1530aa35c7ba9883d87784341f2328030a3f1f38 +2701200 004225f0d5d8033d297421bfb9067c15ff5074a28a2aac04c84a0867bbb6c397 +2701600 000eb6180237f7a43f6190b7f61d8d1891c7f6cb0b837d1ec21ec6729310c780 +2702000 0006ffa9a04ff358a0d0bca76449d436cc6c4ed6f4327f97d7718c4a2b1dd581 +2702400 003b1bb85faa6466a96a3db9ddbe4cb64384ca596c1c47811094a1811ec00797 +2702800 00ceb02d343f803ec0559fddd3848bea2270b092f232f73f4038ef4973b37d24 +2703200 001b765ae2922ff38f22fc18921faa424cb4e5c6afee8f5ee72f3c2e52e7d637 +2703600 001474fa31a08161abe256ac01f43f5235a61c3bee19d4324d0ac310f69ede72 +2704000 00042d3006becaf88b6540c919b69c87a075128d988d25553be59efc8244bcba +2704400 003cec9df892651625dc2fadc49d6196a7b256c9d3059b971c21d14282dc5b8e +2704800 001f5604c6ef7d9a42e0d47d36cfdcffecac65ba7729ae5be346cb59fa9cb704 +2705200 0040cf96282d53a750586d605cb21273085ff2aaa43b8bb9d2e73fed0c92c5e2 +2705600 002e3a602750aa83778f3fe7b0574db93f0231f4f68121c7c43b60b1c5ebfc12 +2706000 002e05a687a80daecaf06b2c8d6af308546807576c04543e79ad2ae6e66f9724 +2706400 001876c33fc1b08905c9f0b5dcb981e47fcba880ab8288bb16362889fcb65e7e +2706800 0010d214adac113db48f14ed45e8d34b30e294d963b6fa27b9bfcd20213f95bc +2707200 00327aeb2b983313feef38bec353c68f581a289d3bee8ed14d6211a6ea03e2b5 +2707600 0008bd0301a168fb1ea80ffee1dcbb455941332d95fe1edad31bfde6915a6f1c +2708000 001326dfeea4a77ac8e1da85d88d1b7bd9c27bd9143601c8595b1b54a278932b +2708400 0015be83fd14a7038305f860d3b6cdeb7325304df2e68ec67697962f74cd112c +2708800 001325b356ed528aea0438b1eda99a650abe0bfa64d1ea64fd6970f4b12061a2 +2709200 002764717d2469186239a2d2de8d432c739d3f1ad80c74a8cdefd46c9f3f9ef5 +2709600 00367b7c76be2dbe828ad0e4e368fb859456030deafe87a8f4ceefe8a5d1bbe5 +2710000 0071a2d9923e34b35da859d7e9cfbb72afa62c440c88eaa54fb4c2d68b28e982 +2710400 003b8dc27ce5c3d63f0d36757317ae7c742ad1daf8eadd70313e2cdcaf876c6b +2710800 0035dc404b6dcbbe2f90762ade418d3776984c7893fedbf6b1787f42d709425e +2711200 000b1c1c1ac3de711f2dce98ccd13f18f252125bac4f0e93d4dd2abffd381859 +2711600 0035e746b00e6038b1946106f8a70f0c2ae279a96233efd5220cea74f7a9fb60 +2712000 0015eb8b49d8d379cc994198c3075edee256ffc9d0be2736064b45bf3bec0e58 +2712400 00080b0330af8cb7303847c4a7e5fbcc8b1329c2e3864298b461accef291bc28 +2712800 001e09f4d73761b8144db6393954094e35497f0780ee939c4418b020dfd7570b +2713200 006387b09c888b746f360330f913aed71384cfcaf7601a78886b4f3515e7936f +2713600 00314d785e0397cae2fe12d0022367d61fe152025a230ef8a56bdc75310ce614 +2714000 0044424991b139f0a9bc1c99082d0037162c2453cd464e112308d7d68a9ce149 +2714400 009e53df287b7c5447ceb7a38916363475c242281c917cd3950fa6195dfab449 +2714800 00369bb009cc5c625e5287b69f86da2a9456120e5c6fe64dc97e01a20422b204 +2715200 003a3f4a74b4766279e4b830ec0d4eaf1de15a7c5eb2ba3ad34ad087e9f214f5 +2715600 000d627aa474ef695edc6ef634b0ead1df15616edd9422d956692998c75c3dfc +2716000 00189465502c8f1f5ec17082756196200b4fa110cb2b0ebf46b1eef2bbe1c795 +2716400 00248445b26691a601b389eafbba16812e72314af14515f340d3a9d23535fe92 +2716800 00050d01207fddce0b9e00845fb297b7768715e2fdf6969210ee35b7dee93353 +2717200 006cdf824547983d0e838cdaedeaaf2d5d0bfc0f20643033bf57eade7b427cb5 +2717600 0025367415d69883db96ecfc27c6e47ba4cdcc2224172acfbd273b41aff16125 +2718000 0042d548fdb5e836314a31666907d69322b6e229531d60def73c2149cae9e44e +2718400 000c98aa1f9d76c6073aece4455072688fec5928e14c785076f826929ec1ce22 +2718800 001175480f7234f968f7f28cec8631ca8df4600dffbaa381934e148b296812ed +2719200 0026aa7490e0d6a9044f54e1dff91508d6cf2b300fc8c870cf8612e769602060 +2719600 003447ec8de1b45eb62007e5c6d4b87310133df337e73c43b6a2164c3178b570 +2720000 001d6d238d715c2d9ad402dc43b90e74356baee657f5f7852d1e899fb0f4c556 +2720400 003f4888a18149135c58cbb36445a770b4bcca23ba4f12d1d7a932b361801300 +2720800 0043d608126c21acc52d0c32eb5092450040952389e65e3277d264ec4c616e77 +2721200 000e88366a7114e9e619b8929c23301d1ede97ae43a0dd5d8f76190b86d12926 +2721600 0039dc58e64fbe987a9e1bf5f8bc61753463463e9ca28cd6e610fe12b2a6bd94 +2722000 0008547da2d58dc7cf7fb3b79778de079bae6027c516ed63202a8bddfcb8bd27 +2722400 0029de07fbd081f90e2ad2166e808d72b6496a8bf8b0a6d02ed204fa99945d2e +2722800 0028916e2497a362b0cc74440a612b6145ba091d2d5a6d7971afeb095ec0d8fb +2723200 0021d951f7949242503a2a048bd956db00070283494d07f064fb26ed0448a569 +2723600 003415d44ee19409f65c443959cd901a82ad92b05ef39d23719bf597ba0256ce +2724000 001fa35a5b9aa6202d98cf7c31e2aa44378caa7cf08fec2b108da0f3ec9805f8 +2724400 001062076a22dae786efbfa11648ae08e5d87b772800d4f447ee893de1bfd40b +2724800 003bbcc622058d2bd91fd194224bcbd6b1d2ae08316fed2226834f4188ce7ff5 +2725200 00231b6c5398cc2fb489066e202a7bc634eed67ee6dea84db0ae9247be789554 +2725600 00398d7b45f2f51c3ee3ada670f6d12d5921c9032696900bb5d5b1b062b70bca +2726000 0023a2cc37759a6518f7469bde81959d38e90df134240ef9b43d59d29f4fddf1 +2726400 002a79b0c18db047110409a804bd257681852650d15b28977d2dd12edfc1631e +2726800 003f3ef8cd3ac611f543a0724ba4c8888e144551dd89e47fa6385873bd49b357 +2727200 003ba226a19d5608b2cd726e31d658ffb802de92f1ec1f4117f43899602b5a17 +2727600 001ff47bc28b72302f17a3b33caca54528004fabe31cbc94df70e9764a3ad952 diff --git a/zebra-consensus/src/config.rs b/zebra-consensus/src/config.rs index 8bad913b8c6..709c73ddff2 100644 --- a/zebra-consensus/src/config.rs +++ b/zebra-consensus/src/config.rs @@ -4,8 +4,13 @@ use serde::{Deserialize, Serialize}; /// Configuration for parallel semantic verification: /// -#[derive(Clone, Debug, Deserialize, Serialize)] -#[serde(deny_unknown_fields, default)] +#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)] +#[serde( + deny_unknown_fields, + default, + from = "InnerConfig", + into = "InnerConfig" +)] pub struct Config { /// Should Zebra make sure that it follows the consensus chain while syncing? /// This is a developer-only option. @@ -30,9 +35,40 @@ pub struct Config { /// For security reasons, this option might be deprecated or ignored in a future Zebra /// release. pub checkpoint_sync: bool, +} + +impl From for Config { + fn from( + InnerConfig { + checkpoint_sync, .. + }: InnerConfig, + ) -> Self { + Self { checkpoint_sync } + } +} - /// Skip the pre-download of Groth16 parameters if this option is true. - pub debug_skip_parameter_preload: bool, +impl From for InnerConfig { + fn from(Config { checkpoint_sync }: Config) -> Self { + Self { + checkpoint_sync, + _debug_skip_parameter_preload: false, + } + } +} + +/// Inner consensus configuration for backwards compatibility with older `zebrad.toml` files, +/// which contain fields that have been removed. +/// +/// Rust API callers should use [`Config`]. +#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct InnerConfig { + /// See [`Config`] for more details. + pub checkpoint_sync: bool, + + #[serde(skip_serializing, rename = "debug_skip_parameter_preload")] + /// Unused config field for backwards compatibility. + pub _debug_skip_parameter_preload: bool, } // we like our default configs to be explicit @@ -42,7 +78,15 @@ impl Default for Config { fn default() -> Self { Self { checkpoint_sync: true, - debug_skip_parameter_preload: false, + } + } +} + +impl Default for InnerConfig { + fn default() -> Self { + Self { + checkpoint_sync: Config::default().checkpoint_sync, + _debug_skip_parameter_preload: false, } } } diff --git a/zebra-consensus/src/lib.rs b/zebra-consensus/src/lib.rs index 28abdda4e13..253c11d4912 100644 --- a/zebra-consensus/src/lib.rs +++ b/zebra-consensus/src/lib.rs @@ -32,25 +32,15 @@ #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] -#![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_consensus")] -// -// Rust 1.72 has a false positive when nested generics are used inside Arc. -// This makes the `arc_with_non_send_sync` lint trigger on a lot of proptest code. -// -// TODO: remove this allow when Rust 1.73 is stable, because this lint bug is fixed in that release: -// -#![cfg_attr( - any(test, feature = "proptest-impl"), - allow(clippy::arc_with_non_send_sync) -)] +#![doc(html_root_url = "https://docs.rs/zebra_consensus")] mod block; mod checkpoint; -mod config; mod parameters; mod primitives; mod script; +pub mod config; pub mod error; pub mod router; pub mod transaction; diff --git a/zebra-consensus/src/primitives/ed25519.rs b/zebra-consensus/src/primitives/ed25519.rs index 7a17ac9e14a..d51113276dd 100644 --- a/zebra-consensus/src/primitives/ed25519.rs +++ b/zebra-consensus/src/primitives/ed25519.rs @@ -164,10 +164,10 @@ impl Service> for Verifier { if result.is_ok() { tracing::trace!(?result, "validated ed25519 signature"); - metrics::counter!("signatures.ed25519.validated", 1); + metrics::counter!("signatures.ed25519.validated").increment(1); } else { tracing::trace!(?result, "invalid ed25519 signature"); - metrics::counter!("signatures.ed25519.invalid", 1); + metrics::counter!("signatures.ed25519.invalid").increment(1); } result.map_err(BoxError::from) } diff --git a/zebra-consensus/src/primitives/ed25519/tests.rs b/zebra-consensus/src/primitives/ed25519/tests.rs index 0847ed08202..cc75e71fa36 100644 --- a/zebra-consensus/src/primitives/ed25519/tests.rs +++ b/zebra-consensus/src/primitives/ed25519/tests.rs @@ -2,70 +2,97 @@ use std::time::Duration; -use color_eyre::eyre::{eyre, Result}; -use futures::stream::{FuturesUnordered, StreamExt}; +use color_eyre::eyre::{eyre, Report, Result}; +use futures::stream::{FuturesOrdered, StreamExt}; use tower::ServiceExt; use tower_batch_control::Batch; use crate::primitives::ed25519::*; -async fn sign_and_verify(mut verifier: V, n: usize) -> Result<(), V::Error> +async fn sign_and_verify( + mut verifier: V, + n: usize, + bad_index: Option, +) -> Result<(), V::Error> where V: Service, { - let mut rng = thread_rng(); - let mut results = FuturesUnordered::new(); + let mut results = FuturesOrdered::new(); for i in 0..n { let span = tracing::trace_span!("sig", i); + let sk = SigningKey::new(thread_rng()); + let vk_bytes = VerificationKeyBytes::from(&sk); let msg = b"BatchVerifyTest"; + let sig = if Some(i) == bad_index { + sk.sign(b"badmsg") + } else { + sk.sign(&msg[..]) + }; - let sk = SigningKey::new(&mut rng); - let vk = VerificationKey::from(&sk); - let sig = sk.sign(&msg[..]); verifier.ready().await?; - results.push(span.in_scope(|| verifier.call((vk.into(), sig, msg).into()))) + results.push_back(span.in_scope(|| verifier.call((vk_bytes, sig, msg).into()))) } - while let Some(result) = results.next().await { - result?; + let mut numbered_results = results.enumerate(); + while let Some((i, result)) = numbered_results.next().await { + if Some(i) == bad_index { + assert!(result.is_err()); + } else { + result?; + } } Ok(()) } #[tokio::test(flavor = "multi_thread")] -async fn batch_flushes_on_max_items_test() -> Result<()> { - batch_flushes_on_max_items().await -} - -#[spandoc::spandoc] -async fn batch_flushes_on_max_items() -> Result<()> { +async fn batch_flushes_on_max_items() -> Result<(), Report> { use tokio::time::timeout; + let _init_guard = zebra_test::init(); // Use a very long max_latency and a short timeout to check that // flushing is happening based on hitting max_items. + // + // Create our own verifier, so we don't shut down a shared verifier used by other tests. let verifier = Batch::new(Verifier::default(), 10, 5, Duration::from_secs(1000)); - timeout(Duration::from_secs(5), sign_and_verify(verifier, 100)) - .await? + timeout(Duration::from_secs(5), sign_and_verify(verifier, 100, None)) + .await + .map_err(|e| eyre!(e))? .map_err(|e| eyre!(e))?; Ok(()) } #[tokio::test(flavor = "multi_thread")] -async fn batch_flushes_on_max_latency_test() -> Result<()> { - batch_flushes_on_max_latency().await -} - -#[spandoc::spandoc] -async fn batch_flushes_on_max_latency() -> Result<()> { +async fn batch_flushes_on_max_latency() -> Result<(), Report> { use tokio::time::timeout; + let _init_guard = zebra_test::init(); // Use a very high max_items and a short timeout to check that // flushing is happening based on hitting max_latency. + // + // Create our own verifier, so we don't shut down a shared verifier used by other tests. let verifier = Batch::new(Verifier::default(), 100, 10, Duration::from_millis(500)); - timeout(Duration::from_secs(5), sign_and_verify(verifier, 10)) - .await? + timeout(Duration::from_secs(5), sign_and_verify(verifier, 10, None)) + .await + .map_err(|e| eyre!(e))? + .map_err(|e| eyre!(e))?; + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread")] +async fn fallback_verification() -> Result<(), Report> { + let _init_guard = zebra_test::init(); + + // Create our own verifier, so we don't shut down a shared verifier used by other tests. + let verifier = Fallback::new( + Batch::new(Verifier::default(), 10, 1, Duration::from_millis(100)), + tower::service_fn(|item: Item| async move { item.verify_single() }), + ); + + sign_and_verify(verifier, 100, Some(39)) + .await .map_err(|e| eyre!(e))?; Ok(()) diff --git a/zebra-consensus/src/primitives/groth16.rs b/zebra-consensus/src/primitives/groth16.rs index e6d7ad17a35..e2ec64144fb 100644 --- a/zebra-consensus/src/primitives/groth16.rs +++ b/zebra-consensus/src/primitives/groth16.rs @@ -485,10 +485,10 @@ impl Service> for Verifier { if result.is_ok() { tracing::trace!(?result, "verified groth16 proof"); - metrics::counter!("proofs.groth16.verified", 1); + metrics::counter!("proofs.groth16.verified").increment(1); } else { tracing::trace!(?result, "invalid groth16 proof"); - metrics::counter!("proofs.groth16.invalid", 1); + metrics::counter!("proofs.groth16.invalid").increment(1); } result.map_err(BoxError::from) diff --git a/zebra-consensus/src/primitives/halo2.rs b/zebra-consensus/src/primitives/halo2.rs index e9cbc4262e6..ffc58a5feb8 100644 --- a/zebra-consensus/src/primitives/halo2.rs +++ b/zebra-consensus/src/primitives/halo2.rs @@ -348,10 +348,10 @@ impl Service> for Verifier { if result.is_ok() { tracing::trace!(?result, "verified halo2 proof"); - metrics::counter!("proofs.halo2.verified", 1); + metrics::counter!("proofs.halo2.verified").increment(1); } else { tracing::trace!(?result, "invalid halo2 proof"); - metrics::counter!("proofs.halo2.invalid", 1); + metrics::counter!("proofs.halo2.invalid").increment(1); } result.map_err(BoxError::from) diff --git a/zebra-consensus/src/primitives/halo2/tests.rs b/zebra-consensus/src/primitives/halo2/tests.rs index fe039fae863..2c2da9c2a0f 100644 --- a/zebra-consensus/src/primitives/halo2/tests.rs +++ b/zebra-consensus/src/primitives/halo2/tests.rs @@ -75,7 +75,7 @@ fn generate_test_vectors() { let action = zebra_chain::orchard::Action { cv: a.cv_net().to_bytes().try_into().unwrap(), nullifier: a.nullifier().to_bytes().try_into().unwrap(), - rk: <[u8; 32]>::from(a.rk()).try_into().unwrap(), + rk: <[u8; 32]>::from(a.rk()).into(), cm_x: pallas::Base::from_repr(a.cmx().into()).unwrap(), ephemeral_key: a.encrypted_note().epk_bytes.try_into().unwrap(), enc_ciphertext: a.encrypted_note().enc_ciphertext.into(), @@ -89,9 +89,7 @@ fn generate_test_vectors() { .collect::>() .try_into() .unwrap(), - binding_sig: <[u8; 64]>::from(bundle.authorization().binding_signature()) - .try_into() - .unwrap(), + binding_sig: <[u8; 64]>::from(bundle.authorization().binding_signature()).into(), } }) .collect(); diff --git a/zebra-consensus/src/primitives/redjubjub.rs b/zebra-consensus/src/primitives/redjubjub.rs index 94be0cdb5f8..31641a9f722 100644 --- a/zebra-consensus/src/primitives/redjubjub.rs +++ b/zebra-consensus/src/primitives/redjubjub.rs @@ -165,10 +165,10 @@ impl Service> for Verifier { if result.is_ok() { tracing::trace!(?result, "validated redjubjub signature"); - metrics::counter!("signatures.redjubjub.validated", 1); + metrics::counter!("signatures.redjubjub.validated").increment(1); } else { tracing::trace!(?result, "invalid redjubjub signature"); - metrics::counter!("signatures.redjubjub.invalid", 1); + metrics::counter!("signatures.redjubjub.invalid").increment(1); } result.map_err(BoxError::from) diff --git a/zebra-consensus/src/primitives/redpallas.rs b/zebra-consensus/src/primitives/redpallas.rs index 5064fa817fb..d848682aaa4 100644 --- a/zebra-consensus/src/primitives/redpallas.rs +++ b/zebra-consensus/src/primitives/redpallas.rs @@ -164,10 +164,10 @@ impl Service> for Verifier { if result.is_ok() { tracing::trace!(?result, "validated redpallas signature"); - metrics::counter!("signatures.redpallas.validated", 1); + metrics::counter!("signatures.redpallas.validated").increment(1); } else { tracing::trace!(?result, "invalid redpallas signature"); - metrics::counter!("signatures.redpallas.invalid", 1); + metrics::counter!("signatures.redpallas.invalid").increment(1); } result.map_err(BoxError::from) diff --git a/zebra-consensus/src/router.rs b/zebra-consensus/src/router.rs index 28fac00c03f..5c10e545b52 100644 --- a/zebra-consensus/src/router.rs +++ b/zebra-consensus/src/router.rs @@ -192,12 +192,10 @@ where } } -/// Initialize block and transaction verification services, -/// and pre-download Groth16 parameters if requested by the `debug_skip_parameter_preload` -/// config parameter and if the download is not already started. +/// Initialize block and transaction verification services. /// /// Returns a block verifier, transaction verifier, -/// the Groth16 parameter download task [`JoinHandle`], +/// a [`BackgroundTaskHandles`] with the state checkpoint verify task, /// and the maximum configured checkpoint verification height. /// /// The consensus configuration is specified by `config`, and the Zcash network @@ -210,12 +208,6 @@ where /// The transaction verification service asynchronously performs semantic verification /// checks. Transactions that pass semantic verification return an `Ok` result to the caller. /// -/// Pre-downloads the Sapling and Sprout Groth16 parameters if needed, -/// checks they were downloaded correctly, and loads them into Zebra. -/// (The transaction verifier automatically downloads the parameters on first use. -/// But the parameter downloads can take around 10 minutes. -/// So we pre-download the parameters, to avoid verification timeouts.) -/// /// This function should only be called once for a particular state service. /// /// Dropped requests are cancelled on a best-effort basis, but may continue to be processed. @@ -230,7 +222,6 @@ pub async fn init( config: Config, network: Network, mut state_service: S, - debug_skip_parameter_preload: bool, ) -> ( Buffer, Request>, Buffer< @@ -244,24 +235,9 @@ where S: Service + Send + Clone + 'static, S::Future: Send + 'static, { - // Give other tasks priority before spawning the download and checkpoint tasks. + // Give other tasks priority before spawning the checkpoint task. tokio::task::yield_now().await; - // Pre-download Groth16 parameters in a separate thread. - - // The parameter download thread must be launched before initializing any verifiers. - // Otherwise, the download might happen on the startup thread. - let span = Span::current(); - let groth16_download_handle = tokio::task::spawn_blocking(move || { - span.in_scope(|| { - if !debug_skip_parameter_preload { - // The lazy static initializer does the download, if needed, - // and the file hash checks. - lazy_static::initialize(&crate::groth16::GROTH16_PARAMETERS); - } - }) - }); - // Make sure the state contains the known best chain checkpoints, in a separate thread. let checkpoint_state_service = state_service.clone(); @@ -288,6 +264,7 @@ where // // let full_checkpoints = CheckpointList::new(network); + let mut already_warned = false; for (height, checkpoint_hash) in full_checkpoints.iter() { let checkpoint_state_service = checkpoint_state_service.clone(); @@ -322,20 +299,14 @@ where unreachable!("unexpected response type: {response:?} from state request") } Err(e) => { - #[cfg(not(test))] - tracing::warn!( - "unexpected error: {e:?} in state request while verifying previous \ - state checkpoints. Is Zebra shutting down?" - ); - // This error happens a lot in some tests. - // - // TODO: fix the tests so they don't cause this error, - // or change the tracing filter - #[cfg(test)] - tracing::debug!( - "unexpected error: {e:?} in state request while verifying previous \ - state checkpoints. Is Zebra shutting down?" - ); + // This error happens a lot in some tests, and it could happen to users. + if !already_warned { + tracing::warn!( + "unexpected error: {e:?} in state request while verifying previous \ + state checkpoints. Is Zebra shutting down?" + ); + already_warned = true; + } } } } @@ -381,7 +352,6 @@ where let router = Buffer::new(BoxService::new(router), VERIFIER_BUFFER_BOUND); let task_handles = BackgroundTaskHandles { - groth16_download_handle, state_checkpoint_verify_handle, }; @@ -408,10 +378,6 @@ pub fn init_checkpoint_list(config: Config, network: Network) -> (CheckpointList /// The background task handles for `zebra-consensus` verifier initialization. #[derive(Debug)] pub struct BackgroundTaskHandles { - /// A handle to the Groth16 parameter download task. - /// Finishes when the parameters are downloaded and their checksums verified. - pub groth16_download_handle: JoinHandle<()>, - /// A handle to the state checkpoint verify task. /// Finishes when all the checkpoints are verified, or when the state tip is reached. pub state_checkpoint_verify_handle: JoinHandle<()>, diff --git a/zebra-consensus/src/router/tests.rs b/zebra-consensus/src/router/tests.rs index eb2abf1b2a3..c438a79f8f2 100644 --- a/zebra-consensus/src/router/tests.rs +++ b/zebra-consensus/src/router/tests.rs @@ -71,7 +71,7 @@ async fn verifiers_from_network( _transaction_verifier, _groth16_download_handle, _max_checkpoint_height, - ) = crate::router::init(Config::default(), network, state_service.clone(), true).await; + ) = crate::router::init(Config::default(), network, state_service.clone()).await; // We can drop the download task handle here, because: // - if the download task fails, the tests will panic, and @@ -144,12 +144,10 @@ static STATE_VERIFY_TRANSCRIPT_GENESIS: Lazy< async fn verify_checkpoint_test() -> Result<(), Report> { verify_checkpoint(Config { checkpoint_sync: true, - debug_skip_parameter_preload: true, }) .await?; verify_checkpoint(Config { checkpoint_sync: false, - debug_skip_parameter_preload: true, }) .await?; @@ -174,7 +172,7 @@ async fn verify_checkpoint(config: Config) -> Result<(), Report> { _transaction_verifier, _groth16_download_handle, _max_checkpoint_height, - ) = super::init(config.clone(), network, zs::init_test(network), true).await; + ) = super::init(config.clone(), network, zs::init_test(network)).await; // Add a timeout layer let block_verifier_router = diff --git a/zebra-consensus/src/transaction/tests.rs b/zebra-consensus/src/transaction/tests.rs index 63d720c9d0b..9b0e6aaaee4 100644 --- a/zebra-consensus/src/transaction/tests.rs +++ b/zebra-consensus/src/transaction/tests.rs @@ -2791,7 +2791,7 @@ fn coinbase_outputs_are_decryptable_for_historical_blocks_for_network( .unwrap(); let coinbase_tx = block .transactions - .get(0) + .first() .expect("must have coinbase transaction"); // Check if the coinbase outputs are decryptable with an all-zero key. diff --git a/zebra-grpc/Cargo.toml b/zebra-grpc/Cargo.toml new file mode 100644 index 00000000000..5d363deae80 --- /dev/null +++ b/zebra-grpc/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "zebra-grpc" +version = "0.1.0-alpha.2" +authors = ["Zcash Foundation "] +description = "Zebra gRPC interface" +license = "MIT OR Apache-2.0" +repository = "https://github.com/ZcashFoundation/zebra" +edition = "2021" + +readme = "../README.md" +homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["zebra", "zcash"] +# Must be one of +categories = ["cryptography::cryptocurrencies"] + +[dependencies] + +futures-util = "0.3.28" +tonic = "0.11.0" +tonic-reflection = "0.11.0" +prost = "0.12.3" +serde = { version = "1.0.196", features = ["serde_derive"] } +tokio = { version = "1.36.0", features = ["macros", "rt-multi-thread"] } +tokio-stream = "0.1.14" +tower = { version = "0.4.13", features = ["util", "buffer"] } +color-eyre = "0.6.2" + +zcash_primitives = { version = "0.13.0-rc.1" } + +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.35", features = ["shielded-scan"] } +zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.35" } + +[build-dependencies] +tonic-build = "0.11.0" + +[dev-dependencies] +insta = { version = "1.33.0", features = ["redactions", "json", "ron"] } + +zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } +zebra-state = { path = "../zebra-state" } +zebra-test = { path = "../zebra-test" } + diff --git a/zebra-grpc/build.rs b/zebra-grpc/build.rs new file mode 100644 index 00000000000..31669063262 --- /dev/null +++ b/zebra-grpc/build.rs @@ -0,0 +1,16 @@ +//! Compile proto files + +use std::{env, path::PathBuf}; + +fn main() -> Result<(), Box> { + let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); + + tonic_build::configure() + .btree_map(["."]) + .protoc_arg("--experimental_allow_proto3_optional") + .type_attribute(".", "#[derive(serde::Deserialize, serde::Serialize)]") + .file_descriptor_set_path(out_dir.join("scanner_descriptor.bin")) + .compile(&["proto/scanner.proto"], &[""])?; + + Ok(()) +} diff --git a/zebra-grpc/proto/scanner.proto b/zebra-grpc/proto/scanner.proto new file mode 100644 index 00000000000..ed68295a446 --- /dev/null +++ b/zebra-grpc/proto/scanner.proto @@ -0,0 +1,108 @@ +syntax = "proto3"; +package scanner; + +// Empty is for gRPCs that take no arguments, currently only GetInfo. +message Empty {} + +service Scanner { + // Get information about the scanner service. + rpc GetInfo (Empty) returns (InfoReply); + + // Clear results for a set of keys without removing the keys from the scanner. + // This request does not stop the scanner from scanning blocks for these keys, it + // only clears past results. + rpc ClearResults(ClearResultsRequest) returns (Empty); + + // Deletes a set of keys and their results from the scanner. + // This request stop the scanner from scanning blocks for the these keys. + rpc DeleteKeys(DeleteKeysRequest) returns (Empty); + + // Get all data we have stored for the given keys. + rpc GetResults(GetResultsRequest) returns (GetResultsResponse); + + // Submits scanning keys to the scanner. + rpc RegisterKeys(RegisterKeysRequest) returns (RegisterKeysResponse); + + // Register keys and listen to the results + rpc Scan (ScanRequest) returns (stream ScanResponse); +} + +// A response to a GetInfo call. +message InfoReply { + // The minimum sapling height allowed. + uint32 min_sapling_birthday_height = 1; +} + +// A request for clearing past results from the scanner cache. +message ClearResultsRequest { + // Keys for which to clear results. + repeated string keys = 1; +} + +// A request to delete keys, delete their results, and stop scanning for their results. +message DeleteKeysRequest { + // Keys to delete from scanner. + repeated string keys = 1; +} + +// A request for getting results for a set of keys. +message GetResultsRequest { + // Keys for which to get results. + repeated string keys = 1; +} + +// A request to register scanning keys +message RegisterKeysRequest { + // Keys to register + repeated KeyWithHeight keys = 1; +} + +// A set of responses for each provided key of a GetResults call. +message GetResultsResponse { + // Results for each key. + map results = 1; +} + +// A response to `RegisterKeysRequest` containing registered keys +message RegisterKeysResponse { + // Keys that were registered + repeated string keys = 1; +} + +// A result for a single key. +message Results { + // A height, transaction id map + map by_height = 1; +} + +// A vector of transaction hashes +message Transactions { + // Transactions + repeated Transaction transactions = 1; +} + +// Transaction data +message Transaction { + // The transaction hash/id + string hash = 1; +} + +// A scanning key with an optional birth height +message KeyWithHeight { + // Scanning key + string key = 1; + // Birth height of the key + optional uint32 height = 2; +} + +// A request for registering keys and getting their transactions +message ScanRequest { + // A set of viewing keys + repeated KeyWithHeight keys = 2; +} + +// Response to Scan calls +message ScanResponse { + // Results for each key. + map results = 1; +} \ No newline at end of file diff --git a/zebra-grpc/src/lib.rs b/zebra-grpc/src/lib.rs new file mode 100644 index 00000000000..47fa8cc706d --- /dev/null +++ b/zebra-grpc/src/lib.rs @@ -0,0 +1,18 @@ +//! Zebra gRPC interface. + +#![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] +#![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] +#![doc(html_root_url = "https://docs.rs/zebra_grpc")] + +pub mod server; + +#[cfg(test)] +mod tests; + +/// The generated scanner proto +pub mod scanner { + tonic::include_proto!("scanner"); + + pub(crate) const FILE_DESCRIPTOR_SET: &[u8] = + tonic::include_file_descriptor_set!("scanner_descriptor"); +} diff --git a/zebra-grpc/src/server.rs b/zebra-grpc/src/server.rs new file mode 100644 index 00000000000..383a71fdc5b --- /dev/null +++ b/zebra-grpc/src/server.rs @@ -0,0 +1,432 @@ +//! The gRPC server implementation + +use std::{collections::BTreeMap, net::SocketAddr, pin::Pin}; + +use futures_util::future::TryFutureExt; +use tokio_stream::{wrappers::ReceiverStream, Stream}; +use tonic::{transport::Server, Request, Response, Status}; +use tower::ServiceExt; + +use zebra_chain::{block::Height, transaction}; +use zebra_node_services::scan_service::{ + request::Request as ScanServiceRequest, + response::{Response as ScanServiceResponse, ScanResult}, +}; + +use crate::scanner::{ + scanner_server::{Scanner, ScannerServer}, + ClearResultsRequest, DeleteKeysRequest, Empty, GetResultsRequest, GetResultsResponse, + InfoReply, KeyWithHeight, RegisterKeysRequest, RegisterKeysResponse, Results, ScanRequest, + ScanResponse, Transaction, Transactions, +}; + +type BoxError = Box; + +/// The maximum number of keys that can be requested in a single request. +pub const MAX_KEYS_PER_REQUEST: usize = 10; + +/// The maximum number of messages that can be queued to be streamed to a client +/// from the `scan` method. +const SCAN_RESPONDER_BUFFER_SIZE: usize = 10_000; + +#[derive(Debug)] +/// The server implementation +pub struct ScannerRPC +where + ScanService: tower::Service + + Clone + + Send + + Sync + + 'static, + >::Future: Send, +{ + scan_service: ScanService, +} + +#[tonic::async_trait] +impl Scanner for ScannerRPC +where + ScanService: tower::Service + + Clone + + Send + + Sync + + 'static, + >::Future: Send, +{ + type ScanStream = Pin> + Send>>; + + async fn scan( + &self, + request: tonic::Request, + ) -> Result, Status> { + let keys = request.into_inner().keys; + + if keys.is_empty() { + let msg = "must provide at least 1 key in scan request"; + return Err(Status::invalid_argument(msg)); + } + + let keys: Vec<_> = keys + .into_iter() + .map(|KeyWithHeight { key, height }| (key, height)) + .collect(); + let register_keys_response_fut = self + .scan_service + .clone() + .oneshot(ScanServiceRequest::RegisterKeys(keys.clone())); + + let keys: Vec<_> = keys.into_iter().map(|(key, _start_at)| key).collect(); + + let subscribe_results_response_fut = + self.scan_service + .clone() + .oneshot(ScanServiceRequest::SubscribeResults( + keys.iter().cloned().collect(), + )); + + let (register_keys_response, subscribe_results_response) = + tokio::join!(register_keys_response_fut, subscribe_results_response_fut); + + let ScanServiceResponse::RegisteredKeys(_) = register_keys_response + .map_err(|err| Status::unknown(format!("scan service returned error: {err}")))? + else { + return Err(Status::unknown( + "scan service returned an unexpected response", + )); + }; + + let ScanServiceResponse::SubscribeResults(mut results_receiver) = + subscribe_results_response + .map_err(|err| Status::unknown(format!("scan service returned error: {err}")))? + else { + return Err(Status::unknown( + "scan service returned an unexpected response", + )); + }; + + let ScanServiceResponse::Results(results) = self + .scan_service + .clone() + .ready() + .and_then(|service| service.call(ScanServiceRequest::Results(keys.clone()))) + .await + .map_err(|err| Status::unknown(format!("scan service returned error: {err}")))? + else { + return Err(Status::unknown( + "scan service returned an unexpected response", + )); + }; + + let (response_sender, response_receiver) = + tokio::sync::mpsc::channel(SCAN_RESPONDER_BUFFER_SIZE); + let response_stream = ReceiverStream::new(response_receiver); + + tokio::spawn(async move { + let mut initial_results = process_results(keys, results); + + // Empty results receiver channel to filter out duplicate results between the channel and cache + while let Ok(ScanResult { key, height, tx_id }) = results_receiver.try_recv() { + let entry = initial_results + .entry(key) + .or_default() + .by_height + .entry(height.0) + .or_default(); + + let tx_id = Transaction { + hash: tx_id.to_string(), + }; + + // Add the scan result to the initial results if it's not already present. + if !entry.transactions.contains(&tx_id) { + entry.transactions.push(tx_id); + } + } + + let send_result = response_sender + .send(Ok(ScanResponse { + results: initial_results, + })) + .await; + + if send_result.is_err() { + // return early if the client has disconnected + return; + } + + while let Some(scan_result) = results_receiver.recv().await { + let send_result = response_sender.send(Ok(scan_result.into())).await; + + // Finish task if the client has disconnected + if send_result.is_err() { + break; + } + } + }); + + Ok(Response::new(Box::pin(response_stream))) + } + + async fn get_info( + &self, + _request: tonic::Request, + ) -> Result, Status> { + let ScanServiceResponse::Info { + min_sapling_birthday_height, + } = self + .scan_service + .clone() + .ready() + .and_then(|service| service.call(ScanServiceRequest::Info)) + .await + .map_err(|_| Status::unknown("scan service was unavailable"))? + else { + return Err(Status::unknown( + "scan service returned an unexpected response", + )); + }; + + let reply = InfoReply { + min_sapling_birthday_height: min_sapling_birthday_height.0, + }; + + Ok(Response::new(reply)) + } + + async fn register_keys( + &self, + request: Request, + ) -> Result, Status> { + let keys: Vec<_> = request + .into_inner() + .keys + .into_iter() + .map(|key_with_height| (key_with_height.key, key_with_height.height)) + .collect(); + + if keys.is_empty() { + let msg = "must provide at least 1 key for which to register keys"; + return Err(Status::invalid_argument(msg)); + } + + if keys.len() > MAX_KEYS_PER_REQUEST { + let msg = format!( + "must provide at most {} keys to register keys", + MAX_KEYS_PER_REQUEST + ); + return Err(Status::invalid_argument(msg)); + } + + let ScanServiceResponse::RegisteredKeys(keys) = self + .scan_service + .clone() + .ready() + .and_then(|service| service.call(ScanServiceRequest::RegisterKeys(keys))) + .await + .map_err(|_| Status::unknown("scan service was unavailable"))? + else { + return Err(Status::unknown( + "scan service returned an unexpected response", + )); + }; + + Ok(Response::new(RegisterKeysResponse { keys })) + } + + async fn clear_results( + &self, + request: Request, + ) -> Result, Status> { + let keys = request.into_inner().keys; + + if keys.is_empty() { + let msg = "must provide at least 1 key for which to clear results"; + return Err(Status::invalid_argument(msg)); + } + + if keys.len() > MAX_KEYS_PER_REQUEST { + let msg = format!( + "must provide at most {} keys to clear results", + MAX_KEYS_PER_REQUEST + ); + return Err(Status::invalid_argument(msg)); + } + + let ScanServiceResponse::ClearedResults = self + .scan_service + .clone() + .ready() + .and_then(|service| service.call(ScanServiceRequest::ClearResults(keys))) + .await + .map_err(|err| Status::unknown(format!("scan service returned error: {err}")))? + else { + return Err(Status::unknown( + "scan service returned an unexpected response", + )); + }; + + Ok(Response::new(Empty {})) + } + + async fn delete_keys( + &self, + request: Request, + ) -> Result, Status> { + let keys = request.into_inner().keys; + + if keys.is_empty() { + let msg = "must provide at least 1 key to delete"; + return Err(Status::invalid_argument(msg)); + } + + if keys.len() > MAX_KEYS_PER_REQUEST { + let msg = format!( + "must provide at most {} keys to delete", + MAX_KEYS_PER_REQUEST + ); + return Err(Status::invalid_argument(msg)); + } + + let ScanServiceResponse::DeletedKeys = self + .scan_service + .clone() + .ready() + .and_then(|service| service.call(ScanServiceRequest::DeleteKeys(keys))) + .await + .map_err(|err| Status::unknown(format!("scan service returned error: {err}")))? + else { + return Err(Status::unknown( + "scan service returned an unexpected response", + )); + }; + + Ok(Response::new(Empty {})) + } + + async fn get_results( + &self, + request: Request, + ) -> Result, Status> { + let keys = request.into_inner().keys; + + if keys.is_empty() { + let msg = "must provide at least 1 key to get results"; + return Err(Status::invalid_argument(msg)); + } + + if keys.len() > MAX_KEYS_PER_REQUEST { + let msg = format!( + "must provide at most {} keys to get results", + MAX_KEYS_PER_REQUEST + ); + return Err(Status::invalid_argument(msg)); + } + + let ScanServiceResponse::Results(response) = self + .scan_service + .clone() + .ready() + .and_then(|service| service.call(ScanServiceRequest::Results(keys.clone()))) + .await + .map_err(|err| Status::unknown(format!("scan service returned error: {err}")))? + else { + return Err(Status::unknown( + "scan service returned an unexpected response", + )); + }; + + let results = process_results(keys, response); + + Ok(Response::new(GetResultsResponse { results })) + } +} + +fn process_results( + keys: Vec, + results: BTreeMap>>, +) -> BTreeMap { + // If there are no results for a key, we still want to return it with empty results. + let empty_map = BTreeMap::new(); + + keys.into_iter() + .map(|key| { + let values = results.get(&key).unwrap_or(&empty_map); + + // Skip heights with no transactions, they are scanner markers and should not be returned. + let transactions = Results { + by_height: values + .iter() + .filter(|(_, transactions)| !transactions.is_empty()) + .map(|(height, transactions)| { + let transactions = transactions + .iter() + .map(ToString::to_string) + .map(|hash| Transaction { hash }) + .collect(); + (height.0, Transactions { transactions }) + }) + .collect(), + }; + + (key, transactions) + }) + .collect::>() +} + +impl From for ScanResponse { + fn from( + ScanResult { + key, + height: Height(height), + tx_id, + }: ScanResult, + ) -> Self { + ScanResponse { + results: [( + key, + Results { + by_height: [( + height, + Transactions { + transactions: [tx_id.to_string()] + .map(|hash| Transaction { hash }) + .to_vec(), + }, + )] + .into_iter() + .collect(), + }, + )] + .into_iter() + .collect(), + } + } +} + +/// Initializes the zebra-scan gRPC server +pub async fn init( + listen_addr: SocketAddr, + scan_service: ScanService, +) -> Result<(), color_eyre::Report> +where + ScanService: tower::Service + + Clone + + Send + + Sync + + 'static, + >::Future: Send, +{ + let service = ScannerRPC { scan_service }; + let reflection_service = tonic_reflection::server::Builder::configure() + .register_encoded_file_descriptor_set(crate::scanner::FILE_DESCRIPTOR_SET) + .build() + .unwrap(); + + Server::builder() + .add_service(reflection_service) + .add_service(ScannerServer::new(service)) + .serve(listen_addr) + .await?; + + Ok(()) +} diff --git a/zebra-grpc/src/tests.rs b/zebra-grpc/src/tests.rs new file mode 100644 index 00000000000..2d6aa53242c --- /dev/null +++ b/zebra-grpc/src/tests.rs @@ -0,0 +1,2 @@ +mod snapshot; +mod vectors; diff --git a/zebra-grpc/src/tests/snapshot.rs b/zebra-grpc/src/tests/snapshot.rs new file mode 100644 index 00000000000..37acb59fe02 --- /dev/null +++ b/zebra-grpc/src/tests/snapshot.rs @@ -0,0 +1,241 @@ +//! Snapshot tests for Zebra Scan gRPC responses. +//! +//! Currently we snapshot the `get_info` and `get_results` responses for both mainnet and testnet with a +//! mocked scanner database. Calls that return `Empty` responses are not snapshoted in this suite. +//! +//! To update these snapshots, run: +//! ```sh +//! cargo insta test --review --delete-unreferenced-snapshots +//! ``` +use std::{collections::BTreeMap, thread::sleep, time::Duration}; + +use zebra_chain::{block::Height, parameters::Network, transaction}; +use zebra_test::mock_service::MockService; + +use zebra_node_services::scan_service::{ + request::Request as ScanRequest, + response::{Response as ScanResponse, ScanResult}, +}; + +use crate::{ + scanner::{ + self, scanner_client::ScannerClient, Empty, GetResultsRequest, GetResultsResponse, + InfoReply, KeyWithHeight, + }, + server::init, +}; + +/// The extended Sapling viewing key of [ZECpages](https://zecpages.com/boardinfo) +pub const ZECPAGES_SAPLING_VIEWING_KEY: &str = "zxviews1q0duytgcqqqqpqre26wkl45gvwwwd706xw608hucmvfalr759ejwf7qshjf5r9aa7323zulvz6plhttp5mltqcgs9t039cx2d09mgq05ts63n8u35hyv6h9nc9ctqqtue2u7cer2mqegunuulq2luhq3ywjcz35yyljewa4mgkgjzyfwh6fr6jd0dzd44ghk0nxdv2hnv4j5nxfwv24rwdmgllhe0p8568sgqt9ckt02v2kxf5ahtql6s0ltjpkckw8gtymxtxuu9gcr0swvz"; + +#[tokio::test(flavor = "multi_thread")] +async fn test_grpc_response_data() { + let _init_guard = zebra_test::init(); + + tokio::join!( + test_mocked_rpc_response_data_for_network( + Network::Mainnet, + zebra_test::net::random_known_port() + ), + test_mocked_rpc_response_data_for_network( + Network::Testnet, + zebra_test::net::random_known_port() + ), + ); +} + +async fn test_mocked_rpc_response_data_for_network(network: Network, random_port: u16) { + // get a mocked scan service + let mock_scan_service = MockService::build().for_unit_tests(); + + // start the gRPC server + let listen_addr: std::net::SocketAddr = format!("127.0.0.1:{random_port}") + .parse() + .expect("hard-coded IP and u16 port should parse successfully"); + + { + let mock_scan_service = mock_scan_service.clone(); + tokio::spawn(async move { + init(listen_addr, mock_scan_service) + .await + .expect("Possible port conflict"); + }); + } + + // wait for the server to start + sleep(Duration::from_secs(1)); + + // connect to the gRPC server + let client = ScannerClient::connect(format!("http://127.0.0.1:{random_port}")) + .await + .expect("server should receive connection"); + + // insta settings + let mut settings = insta::Settings::clone_current(); + settings.set_snapshot_suffix(network.lowercase_name()); + + // snapshot the get_info grpc call + let get_info_response_fut = { + let mut client = client.clone(); + let get_info_request = tonic::Request::new(Empty {}); + tokio::spawn(async move { client.get_info(get_info_request).await }) + }; + + { + let mut mock_scan_service = mock_scan_service.clone(); + tokio::spawn(async move { + mock_scan_service + .expect_request_that(|req| matches!(req, ScanRequest::Info)) + .await + .respond(ScanResponse::Info { + min_sapling_birthday_height: network.sapling_activation_height(), + }) + }); + } + + // snapshot the get_info grpc call + + let get_info_response = get_info_response_fut + .await + .expect("tokio task should join successfully") + .expect("get_info request should succeed"); + + snapshot_rpc_getinfo(get_info_response.into_inner(), &settings); + + // snapshot the get_results grpc call + + let get_results_response_fut = { + let mut client = client.clone(); + let get_results_request = tonic::Request::new(GetResultsRequest { + keys: vec![ZECPAGES_SAPLING_VIEWING_KEY.to_string()], + }); + tokio::spawn(async move { client.get_results(get_results_request).await }) + }; + + { + let mut mock_scan_service = mock_scan_service.clone(); + tokio::spawn(async move { + let zec_pages_sapling_efvk = ZECPAGES_SAPLING_VIEWING_KEY.to_string(); + let mut fake_results = BTreeMap::new(); + for fake_result_height in [Height::MIN, Height(1), Height::MAX] { + fake_results.insert( + fake_result_height, + [transaction::Hash::from([0; 32])].repeat(3), + ); + } + + let mut fake_results_response = BTreeMap::new(); + fake_results_response.insert(zec_pages_sapling_efvk, fake_results); + + mock_scan_service + .expect_request_that(|req| matches!(req, ScanRequest::Results(_))) + .await + .respond(ScanResponse::Results(fake_results_response)) + }); + } + + let get_results_response = get_results_response_fut + .await + .expect("tokio task should join successfully") + .expect("get_results request should succeed"); + + snapshot_rpc_getresults(get_results_response.into_inner(), &settings); + + // snapshot the scan grpc method + + let scan_response_fut = { + let mut client = client.clone(); + let get_results_request = tonic::Request::new(scanner::ScanRequest { + keys: vec![KeyWithHeight { + key: ZECPAGES_SAPLING_VIEWING_KEY.to_string(), + height: None, + }], + }); + tokio::spawn(async move { client.scan(get_results_request).await }) + }; + + let (fake_results_sender, fake_results_receiver) = tokio::sync::mpsc::channel(1); + + { + let mut mock_scan_service = mock_scan_service.clone(); + tokio::spawn(async move { + let zec_pages_sapling_efvk = ZECPAGES_SAPLING_VIEWING_KEY.to_string(); + let mut fake_results = BTreeMap::new(); + for fake_result_height in [Height::MIN, Height(1), Height::MAX] { + fake_results.insert( + fake_result_height, + [transaction::Hash::from([0; 32])].repeat(3), + ); + } + + let mut fake_results_response = BTreeMap::new(); + fake_results_response.insert(zec_pages_sapling_efvk.clone(), fake_results); + + mock_scan_service + .expect_request_that(|req| matches!(req, ScanRequest::RegisterKeys(_))) + .await + .respond(ScanResponse::RegisteredKeys(vec![])); + + mock_scan_service + .expect_request_that(|req| matches!(req, ScanRequest::SubscribeResults(_))) + .await + .respond(ScanResponse::SubscribeResults(fake_results_receiver)); + + mock_scan_service + .expect_request_that(|req| matches!(req, ScanRequest::Results(_))) + .await + .respond(ScanResponse::Results(fake_results_response)); + }); + } + + let scan_response = scan_response_fut + .await + .expect("tokio task should join successfully") + .expect("get_results request should succeed"); + + let mut scan_response_stream = scan_response.into_inner(); + + let scan_response_message = scan_response_stream + .message() + .await + .expect("scan response message should be ok") + .expect("scan response message should be some"); + + snapshot_rpc_scan("cached", scan_response_message, &settings); + + fake_results_sender + .send(ScanResult { + key: ZECPAGES_SAPLING_VIEWING_KEY.to_string(), + height: Height::MIN, + tx_id: transaction::Hash::from([0; 32]), + }) + .await + .expect("should send fake result successfully"); + + let scan_response_message = scan_response_stream + .message() + .await + .expect("scan response message should be ok") + .expect("scan response message should be some"); + + snapshot_rpc_scan("subscribed", scan_response_message, &settings); +} + +/// Snapshot `getinfo` response, using `cargo insta` and JSON serialization. +fn snapshot_rpc_getinfo(info: InfoReply, settings: &insta::Settings) { + settings.bind(|| insta::assert_json_snapshot!("get_info", info)); +} + +/// Snapshot `getresults` response, using `cargo insta` and JSON serialization. +fn snapshot_rpc_getresults(results: GetResultsResponse, settings: &insta::Settings) { + settings.bind(|| insta::assert_json_snapshot!("get_results", results)); +} + +/// Snapshot `scan` response, using `cargo insta` and JSON serialization. +fn snapshot_rpc_scan( + variant: &'static str, + scan_response: scanner::ScanResponse, + settings: &insta::Settings, +) { + settings.bind(|| insta::assert_json_snapshot!(format!("scan_{variant}"), scan_response)); +} diff --git a/zebra-grpc/src/tests/snapshots/get_info@mainnet.snap b/zebra-grpc/src/tests/snapshots/get_info@mainnet.snap new file mode 100644 index 00000000000..ad15b2a1ae5 --- /dev/null +++ b/zebra-grpc/src/tests/snapshots/get_info@mainnet.snap @@ -0,0 +1,7 @@ +--- +source: zebra-grpc/src/tests/snapshot.rs +expression: info +--- +{ + "min_sapling_birthday_height": 419200 +} diff --git a/zebra-grpc/src/tests/snapshots/get_info@testnet.snap b/zebra-grpc/src/tests/snapshots/get_info@testnet.snap new file mode 100644 index 00000000000..9c39e93e8e2 --- /dev/null +++ b/zebra-grpc/src/tests/snapshots/get_info@testnet.snap @@ -0,0 +1,7 @@ +--- +source: zebra-grpc/src/tests/snapshot.rs +expression: info +--- +{ + "min_sapling_birthday_height": 280000 +} diff --git a/zebra-grpc/src/tests/snapshots/get_results@mainnet.snap b/zebra-grpc/src/tests/snapshots/get_results@mainnet.snap new file mode 100644 index 00000000000..26caa0e85e8 --- /dev/null +++ b/zebra-grpc/src/tests/snapshots/get_results@mainnet.snap @@ -0,0 +1,51 @@ +--- +source: zebra-grpc/src/tests/snapshot.rs +expression: results +--- +{ + "results": { + "zxviews1q0duytgcqqqqpqre26wkl45gvwwwd706xw608hucmvfalr759ejwf7qshjf5r9aa7323zulvz6plhttp5mltqcgs9t039cx2d09mgq05ts63n8u35hyv6h9nc9ctqqtue2u7cer2mqegunuulq2luhq3ywjcz35yyljewa4mgkgjzyfwh6fr6jd0dzd44ghk0nxdv2hnv4j5nxfwv24rwdmgllhe0p8568sgqt9ckt02v2kxf5ahtql6s0ltjpkckw8gtymxtxuu9gcr0swvz": { + "by_height": { + "0": { + "transactions": [ + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] + }, + "1": { + "transactions": [ + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] + }, + "2147483647": { + "transactions": [ + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] + } + } + } + } +} diff --git a/zebra-grpc/src/tests/snapshots/get_results@testnet.snap b/zebra-grpc/src/tests/snapshots/get_results@testnet.snap new file mode 100644 index 00000000000..26caa0e85e8 --- /dev/null +++ b/zebra-grpc/src/tests/snapshots/get_results@testnet.snap @@ -0,0 +1,51 @@ +--- +source: zebra-grpc/src/tests/snapshot.rs +expression: results +--- +{ + "results": { + "zxviews1q0duytgcqqqqpqre26wkl45gvwwwd706xw608hucmvfalr759ejwf7qshjf5r9aa7323zulvz6plhttp5mltqcgs9t039cx2d09mgq05ts63n8u35hyv6h9nc9ctqqtue2u7cer2mqegunuulq2luhq3ywjcz35yyljewa4mgkgjzyfwh6fr6jd0dzd44ghk0nxdv2hnv4j5nxfwv24rwdmgllhe0p8568sgqt9ckt02v2kxf5ahtql6s0ltjpkckw8gtymxtxuu9gcr0swvz": { + "by_height": { + "0": { + "transactions": [ + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] + }, + "1": { + "transactions": [ + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] + }, + "2147483647": { + "transactions": [ + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] + } + } + } + } +} diff --git a/zebra-grpc/src/tests/snapshots/scan_cached@mainnet.snap b/zebra-grpc/src/tests/snapshots/scan_cached@mainnet.snap new file mode 100644 index 00000000000..9187f5828b0 --- /dev/null +++ b/zebra-grpc/src/tests/snapshots/scan_cached@mainnet.snap @@ -0,0 +1,51 @@ +--- +source: zebra-grpc/src/tests/snapshot.rs +expression: scan_response +--- +{ + "results": { + "zxviews1q0duytgcqqqqpqre26wkl45gvwwwd706xw608hucmvfalr759ejwf7qshjf5r9aa7323zulvz6plhttp5mltqcgs9t039cx2d09mgq05ts63n8u35hyv6h9nc9ctqqtue2u7cer2mqegunuulq2luhq3ywjcz35yyljewa4mgkgjzyfwh6fr6jd0dzd44ghk0nxdv2hnv4j5nxfwv24rwdmgllhe0p8568sgqt9ckt02v2kxf5ahtql6s0ltjpkckw8gtymxtxuu9gcr0swvz": { + "by_height": { + "0": { + "transactions": [ + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] + }, + "1": { + "transactions": [ + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] + }, + "2147483647": { + "transactions": [ + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] + } + } + } + } +} diff --git a/zebra-grpc/src/tests/snapshots/scan_cached@testnet.snap b/zebra-grpc/src/tests/snapshots/scan_cached@testnet.snap new file mode 100644 index 00000000000..9187f5828b0 --- /dev/null +++ b/zebra-grpc/src/tests/snapshots/scan_cached@testnet.snap @@ -0,0 +1,51 @@ +--- +source: zebra-grpc/src/tests/snapshot.rs +expression: scan_response +--- +{ + "results": { + "zxviews1q0duytgcqqqqpqre26wkl45gvwwwd706xw608hucmvfalr759ejwf7qshjf5r9aa7323zulvz6plhttp5mltqcgs9t039cx2d09mgq05ts63n8u35hyv6h9nc9ctqqtue2u7cer2mqegunuulq2luhq3ywjcz35yyljewa4mgkgjzyfwh6fr6jd0dzd44ghk0nxdv2hnv4j5nxfwv24rwdmgllhe0p8568sgqt9ckt02v2kxf5ahtql6s0ltjpkckw8gtymxtxuu9gcr0swvz": { + "by_height": { + "0": { + "transactions": [ + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] + }, + "1": { + "transactions": [ + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] + }, + "2147483647": { + "transactions": [ + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] + } + } + } + } +} diff --git a/zebra-grpc/src/tests/snapshots/scan_subscribed@mainnet.snap b/zebra-grpc/src/tests/snapshots/scan_subscribed@mainnet.snap new file mode 100644 index 00000000000..bb964432dbb --- /dev/null +++ b/zebra-grpc/src/tests/snapshots/scan_subscribed@mainnet.snap @@ -0,0 +1,19 @@ +--- +source: zebra-grpc/src/tests/snapshot.rs +expression: scan_response +--- +{ + "results": { + "zxviews1q0duytgcqqqqpqre26wkl45gvwwwd706xw608hucmvfalr759ejwf7qshjf5r9aa7323zulvz6plhttp5mltqcgs9t039cx2d09mgq05ts63n8u35hyv6h9nc9ctqqtue2u7cer2mqegunuulq2luhq3ywjcz35yyljewa4mgkgjzyfwh6fr6jd0dzd44ghk0nxdv2hnv4j5nxfwv24rwdmgllhe0p8568sgqt9ckt02v2kxf5ahtql6s0ltjpkckw8gtymxtxuu9gcr0swvz": { + "by_height": { + "0": { + "transactions": [ + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] + } + } + } + } +} diff --git a/zebra-grpc/src/tests/snapshots/scan_subscribed@testnet.snap b/zebra-grpc/src/tests/snapshots/scan_subscribed@testnet.snap new file mode 100644 index 00000000000..bb964432dbb --- /dev/null +++ b/zebra-grpc/src/tests/snapshots/scan_subscribed@testnet.snap @@ -0,0 +1,19 @@ +--- +source: zebra-grpc/src/tests/snapshot.rs +expression: scan_response +--- +{ + "results": { + "zxviews1q0duytgcqqqqpqre26wkl45gvwwwd706xw608hucmvfalr759ejwf7qshjf5r9aa7323zulvz6plhttp5mltqcgs9t039cx2d09mgq05ts63n8u35hyv6h9nc9ctqqtue2u7cer2mqegunuulq2luhq3ywjcz35yyljewa4mgkgjzyfwh6fr6jd0dzd44ghk0nxdv2hnv4j5nxfwv24rwdmgllhe0p8568sgqt9ckt02v2kxf5ahtql6s0ltjpkckw8gtymxtxuu9gcr0swvz": { + "by_height": { + "0": { + "transactions": [ + { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] + } + } + } + } +} diff --git a/zebra-grpc/src/tests/vectors.rs b/zebra-grpc/src/tests/vectors.rs new file mode 100644 index 00000000000..41959a3bdd1 --- /dev/null +++ b/zebra-grpc/src/tests/vectors.rs @@ -0,0 +1,582 @@ +//! Unit tests for gRPC methods +use std::{collections::BTreeMap, thread::sleep, time::Duration}; + +use tonic::transport::Channel; + +use zebra_chain::{block::Height, parameters::Network, transaction}; +use zebra_test::{ + mock_service::{MockService, PanicAssertion}, + net::random_known_port, +}; + +use crate::{ + scanner::{ + scanner_client::ScannerClient, ClearResultsRequest, DeleteKeysRequest, Empty, + GetResultsRequest, GetResultsResponse, InfoReply, KeyWithHeight, RegisterKeysRequest, + RegisterKeysResponse, + }, + server::{init, MAX_KEYS_PER_REQUEST}, +}; +use zebra_node_services::scan_service::{ + request::Request as ScanRequest, response::Response as ScanResponse, +}; + +/// The extended Sapling viewing key of [ZECpages](https://zecpages.com/boardinfo) +pub const ZECPAGES_SAPLING_VIEWING_KEY: &str = "zxviews1q0duytgcqqqqpqre26wkl45gvwwwd706xw608hucmvfalr759ejwf7qshjf5r9aa7323zulvz6plhttp5mltqcgs9t039cx2d09mgq05ts63n8u35hyv6h9nc9ctqqtue2u7cer2mqegunuulq2luhq3ywjcz35yyljewa4mgkgjzyfwh6fr6jd0dzd44ghk0nxdv2hnv4j5nxfwv24rwdmgllhe0p8568sgqt9ckt02v2kxf5ahtql6s0ltjpkckw8gtymxtxuu9gcr0swvz"; + +/// Test the gRPC methods with mocked responses +#[tokio::test(flavor = "multi_thread")] +async fn test_grpc_methods_mocked() { + let _init_guard = zebra_test::init(); + + let (client, mock_scan_service) = start_server_and_get_client(random_known_port()).await; + + test_get_results_errors(client.clone()).await; + test_register_keys_errors(client.clone()).await; + test_clear_results_errors(client.clone()).await; + test_delete_keys_errors(client.clone()).await; + + for network in Network::iter() { + test_mocked_getinfo_for_network(&client, &mock_scan_service, network).await; + test_mocked_getresults_for_network(&client, &mock_scan_service, network).await; + test_mocked_register_keys_for_network(&client, &mock_scan_service, network).await; + test_mocked_clear_results_for_network(&client, &mock_scan_service, network).await; + test_mocked_delete_keys_for_network(&client, &mock_scan_service, network).await; + } +} + +/// Test the `get_info` gRPC method with a mocked service response. +async fn test_mocked_getinfo_for_network( + client: &ScannerClient, + mock_scan_service: &MockService, + network: Network, +) { + // create request, fake results and get response + let get_info_response = call_get_info(client.clone(), mock_scan_service.clone(), network).await; + + // test the response + assert_eq!( + get_info_response.into_inner().min_sapling_birthday_height, + network.sapling_activation_height().0, + "get_info response min sapling height should match network sapling activation height" + ); +} + +/// Test the `get_results` gRPC method with populated and empty results +async fn test_mocked_getresults_for_network( + client: &ScannerClient, + mock_scan_service: &MockService, + network: Network, +) { + // create request, fake populated results and get response + let get_results_response = + call_get_results(client.clone(), mock_scan_service.clone(), network, false).await; + + // test the response + let transaction_heights = get_results_response + .into_inner() + .results + .first_key_value() + .expect("should have at least 1 value") + .1 + .by_height + .len(); + match network { + Network::Mainnet => { + assert_eq!( + transaction_heights, 3, + "there should be 3 transaction heights" + ); + } + Network::Testnet => { + assert_eq!( + transaction_heights, 1, + "there should be 1 transaction height" + ); + } + } + + // create request, fake empty results and get response + let get_results_response = + call_get_results(client.clone(), mock_scan_service.clone(), network, true).await; + + // test the response + let is_results_empty = get_results_response + .into_inner() + .results + .first_key_value() + .expect("should have at least 1 value") + .1 + .by_height + .is_empty(); + + assert!(is_results_empty, "results should be empty"); +} + +/// Test the `register_keys` gRPC method +async fn test_mocked_register_keys_for_network( + client: &ScannerClient, + mock_scan_service: &MockService, + network: Network, +) { + // create request, fake return value and get response + let register_keys_response = + call_register_keys(client.clone(), mock_scan_service.clone(), network).await; + + // test the response + assert_eq!( + register_keys_response.into_inner().keys, + vec![ZECPAGES_SAPLING_VIEWING_KEY.to_string()], + "keys should match mocked response" + ); +} + +/// Test the `clear_results` gRPC method +async fn test_mocked_clear_results_for_network( + client: &ScannerClient, + mock_scan_service: &MockService, + network: Network, +) { + // create request, fake results and get response + let get_results_response = + call_get_results(client.clone(), mock_scan_service.clone(), network, false).await; + + // test the response + let transaction_heights = get_results_response + .into_inner() + .results + .first_key_value() + .unwrap() + .1 + .by_height + .len(); + match network { + Network::Mainnet => { + assert_eq!(transaction_heights, 3); + } + Network::Testnet => { + assert_eq!(transaction_heights, 1); + } + } + + // create request, fake results and get response + let clear_results_response = + call_clear_results(client.clone(), mock_scan_service.clone(), network).await; + + // test the response + assert_eq!(clear_results_response.into_inner(), Empty {}); + + // create request, fake results and get response + let get_results_response = + call_get_results(client.clone(), mock_scan_service.clone(), network, true).await; + + // test the response + let is_results_empty = get_results_response + .into_inner() + .results + .first_key_value() + .unwrap() + .1 + .by_height + .is_empty(); + + assert!(is_results_empty, "results should be empty"); +} + +/// Test the `delete_keys` gRPC method +async fn test_mocked_delete_keys_for_network( + client: &ScannerClient, + mock_scan_service: &MockService, + network: Network, +) { + // create request, fake results and get response + let register_keys_response = + call_register_keys(client.clone(), mock_scan_service.clone(), network).await; + + // test the response + assert_eq!( + register_keys_response.into_inner().keys, + vec![ZECPAGES_SAPLING_VIEWING_KEY.to_string()] + ); + + // create request, fake results and get response + let get_results_response = + call_get_results(client.clone(), mock_scan_service.clone(), network, false).await; + let transaction_heights = get_results_response + .into_inner() + .results + .first_key_value() + .unwrap() + .1 + .by_height + .len(); + match network { + Network::Mainnet => { + assert_eq!(transaction_heights, 3); + } + Network::Testnet => { + assert_eq!(transaction_heights, 1); + } + } + + let delete_keys_response = + call_delete_keys(client.clone(), mock_scan_service.clone(), network).await; + // test the response + assert_eq!(delete_keys_response.into_inner(), Empty {}); + + let get_results_response = + call_get_results(client.clone(), mock_scan_service.clone(), network, true).await; + let is_results_empty = get_results_response + .into_inner() + .results + .first_key_value() + .unwrap() + .1 + .by_height + .is_empty(); + + assert!(is_results_empty, "results should be empty"); +} + +/// Start the gRPC server, get a client and a mock service +async fn start_server_and_get_client( + random_port: u16, +) -> ( + ScannerClient, + MockService, +) { + // get a mocked scan service + let mock_scan_service = MockService::build().for_unit_tests(); + + // start the gRPC server + let listen_addr: std::net::SocketAddr = format!("127.0.0.1:{random_port}") + .parse() + .expect("hard-coded IP and u16 port should parse successfully"); + + { + let mock_scan_service = mock_scan_service.clone(); + tokio::spawn(async move { + init(listen_addr, mock_scan_service) + .await + .expect("Possible port conflict"); + }); + } + + // wait for the server to start + sleep(Duration::from_secs(1)); + + // connect to the gRPC server + let client = ScannerClient::connect(format!("http://127.0.0.1:{random_port}")) + .await + .expect("server should receive connection"); + + (client, mock_scan_service) +} + +/// Add fake populated results to the mock scan service +async fn add_fake_populated_results( + mock_scan_service: MockService, + network: Network, +) { + let mut mock_scan_service = mock_scan_service.clone(); + tokio::spawn(async move { + let zec_pages_sapling_efvk = ZECPAGES_SAPLING_VIEWING_KEY.to_string(); + let mut fake_results = BTreeMap::new(); + let heights = match network { + Network::Mainnet => vec![Height::MIN, Height(1), Height::MAX], + Network::Testnet => vec![Height::MIN], + }; + for fake_result_height in heights { + fake_results.insert( + fake_result_height, + [transaction::Hash::from([0; 32])].repeat(3), + ); + } + + let mut fake_results_response = BTreeMap::new(); + fake_results_response.insert(zec_pages_sapling_efvk, fake_results); + + mock_scan_service + .expect_request_that(|req| matches!(req, ScanRequest::Results(_))) + .await + .respond(ScanResponse::Results(fake_results_response)) + }); +} + +/// Add fake empty results to the mock scan service +async fn add_fake_empty_results( + mock_scan_service: MockService, + _network: Network, +) { + let mut mock_scan_service = mock_scan_service.clone(); + tokio::spawn(async move { + let zec_pages_sapling_efvk = ZECPAGES_SAPLING_VIEWING_KEY.to_string(); + let fake_results = BTreeMap::new(); + let mut fake_results_response = BTreeMap::new(); + fake_results_response.insert(zec_pages_sapling_efvk, fake_results); + + mock_scan_service + .expect_request_that(|req| matches!(req, ScanRequest::Results(_))) + .await + .respond(ScanResponse::Results(fake_results_response)) + }); +} + +/// Call the `get_results` gRPC method, mock and return the response +async fn call_get_results( + client: ScannerClient, + mock_scan_service: MockService, + network: Network, + empty_results: bool, +) -> tonic::Response { + let get_results_response_fut = { + let mut client = client.clone(); + let get_results_request = tonic::Request::new(GetResultsRequest { + keys: vec![ZECPAGES_SAPLING_VIEWING_KEY.to_string()], + }); + tokio::spawn(async move { client.get_results(get_results_request).await }) + }; + + if empty_results { + add_fake_empty_results(mock_scan_service, network).await; + } else { + add_fake_populated_results(mock_scan_service, network).await; + } + + get_results_response_fut + .await + .expect("tokio task should join successfully") + .expect("get_results request should succeed") +} + +/// Calls the `get_results` gRPC method with bad request data and asserts that it returns errors +async fn test_get_results_errors(mut client: ScannerClient) { + let request = tonic::Request::new(GetResultsRequest { keys: vec![] }); + let response = client.get_results(request).await; + + let response_error_code = response + .expect_err("calling get_results with no keys should return an error") + .code(); + + assert_eq!( + response_error_code, + tonic::Code::InvalidArgument, + "error code should be an invalid argument error" + ); + + let request = tonic::Request::new(GetResultsRequest { + keys: vec![ + ZECPAGES_SAPLING_VIEWING_KEY.to_string(); + MAX_KEYS_PER_REQUEST + .checked_add(1) + .expect("should fit in usize") + ], + }); + let response = client.get_results(request).await; + let response_error_code = response + .expect_err("calling get_results too with many keys should return an error") + .code(); + + assert_eq!( + response_error_code, + tonic::Code::InvalidArgument, + "error code should be an invalid argument error" + ); +} + +/// Call the `get_info` gRPC method, mock and return the response +async fn call_get_info( + client: ScannerClient, + mock_scan_service: MockService, + network: Network, +) -> tonic::Response { + let get_info_response_fut = { + let mut client = client.clone(); + let get_info_request = tonic::Request::new(Empty {}); + tokio::spawn(async move { client.get_info(get_info_request).await }) + }; + + let mut mock_scan_service = mock_scan_service.clone(); + tokio::spawn(async move { + mock_scan_service + .expect_request_that(|req| matches!(req, ScanRequest::Info)) + .await + .respond(ScanResponse::Info { + min_sapling_birthday_height: network.sapling_activation_height(), + }) + }); + + get_info_response_fut + .await + .expect("tokio task should join successfully") + .expect("get_info request should succeed") +} + +/// Call the `register_keys` gRPC method, mock and return the response +async fn call_register_keys( + client: ScannerClient, + mock_scan_service: MockService, + _network: Network, +) -> tonic::Response { + let key_with_height = KeyWithHeight { + key: ZECPAGES_SAPLING_VIEWING_KEY.to_string(), + height: None, + }; + + let register_keys_response_fut = { + let mut client = client.clone(); + let register_keys_request = tonic::Request::new(RegisterKeysRequest { + keys: vec![key_with_height], + }); + tokio::spawn(async move { client.register_keys(register_keys_request).await }) + }; + + let mut mock_scan_service = mock_scan_service.clone(); + tokio::spawn(async move { + mock_scan_service + .expect_request_that(|req| matches!(req, ScanRequest::RegisterKeys(_))) + .await + .respond(ScanResponse::RegisteredKeys(vec![ + ZECPAGES_SAPLING_VIEWING_KEY.to_string(), + ])) + }); + + register_keys_response_fut + .await + .expect("tokio task should join successfully") + .expect("register_keys request should succeed") +} + +/// Calls the `register_keys` gRPC method with bad request data and asserts that it returns errors +async fn test_register_keys_errors(client: ScannerClient) { + let key_with_height = KeyWithHeight { + key: ZECPAGES_SAPLING_VIEWING_KEY.to_string(), + height: None, + }; + + let fut = { + let mut client = client.clone(); + let request = tonic::Request::new(RegisterKeysRequest { keys: vec![] }); + tokio::spawn(async move { client.register_keys(request).await }) + }; + + let response = fut.await.expect("tokio task should join successfully"); + assert!(response.is_err()); + assert_eq!(response.err().unwrap().code(), tonic::Code::InvalidArgument); + + let fut = { + let mut client = client.clone(); + let request = tonic::Request::new(RegisterKeysRequest { + keys: vec![key_with_height; 11], + }); + tokio::spawn(async move { client.register_keys(request).await }) + }; + + let response = fut.await.expect("tokio task should join successfully"); + assert!(response.is_err()); + assert_eq!(response.err().unwrap().code(), tonic::Code::InvalidArgument); +} + +async fn call_clear_results( + client: ScannerClient, + mock_scan_service: MockService, + _network: Network, +) -> tonic::Response { + let clear_results_response_fut = { + let mut client = client.clone(); + let clear_results_request = tonic::Request::new(ClearResultsRequest { + keys: vec![ZECPAGES_SAPLING_VIEWING_KEY.to_string()], + }); + tokio::spawn(async move { client.clear_results(clear_results_request).await }) + }; + + let mut mock_scan_service = mock_scan_service.clone(); + tokio::spawn(async move { + mock_scan_service + .expect_request_that(|req| matches!(req, ScanRequest::ClearResults(_))) + .await + .respond(ScanResponse::ClearedResults) + }); + + clear_results_response_fut + .await + .expect("tokio task should join successfully") + .expect("register_keys request should succeed") +} + +/// Calls the `clear_results` gRPC method with bad request data and asserts that it returns errors +async fn test_clear_results_errors(client: ScannerClient) { + let fut = { + let mut client = client.clone(); + let request = tonic::Request::new(ClearResultsRequest { keys: vec![] }); + tokio::spawn(async move { client.clear_results(request).await }) + }; + + let response = fut.await.expect("tokio task should join successfully"); + assert!(response.is_err()); + assert_eq!(response.err().unwrap().code(), tonic::Code::InvalidArgument); + + let fut = { + let mut client = client.clone(); + let request = tonic::Request::new(ClearResultsRequest { + keys: vec![ZECPAGES_SAPLING_VIEWING_KEY.to_string(); 11], + }); + tokio::spawn(async move { client.clear_results(request).await }) + }; + + let response = fut.await.expect("tokio task should join successfully"); + assert!(response.is_err()); + assert_eq!(response.err().unwrap().code(), tonic::Code::InvalidArgument); +} + +/// Call the `delete_keys` gRPC method, mock and return the response +async fn call_delete_keys( + client: ScannerClient, + mock_scan_service: MockService, + _network: Network, +) -> tonic::Response { + let delete_keys_response_fut = { + let mut client = client.clone(); + let delete_keys_request = tonic::Request::new(DeleteKeysRequest { + keys: vec![ZECPAGES_SAPLING_VIEWING_KEY.to_string()], + }); + tokio::spawn(async move { client.delete_keys(delete_keys_request).await }) + }; + + let mut mock_scan_service = mock_scan_service.clone(); + tokio::spawn(async move { + mock_scan_service + .expect_request_that(|req| matches!(req, ScanRequest::DeleteKeys(_))) + .await + .respond(ScanResponse::DeletedKeys) + }); + delete_keys_response_fut + .await + .expect("tokio task should join successfully") + .expect("delete_keys request should succeed") +} + +/// Calls the `delete_keys` gRPC method with bad request data and asserts that it returns errors +async fn test_delete_keys_errors(client: ScannerClient) { + let fut = { + let mut client = client.clone(); + let request = tonic::Request::new(DeleteKeysRequest { keys: vec![] }); + tokio::spawn(async move { client.delete_keys(request).await }) + }; + + let response = fut.await.expect("tokio task should join successfully"); + assert!(response.is_err()); + assert_eq!(response.err().unwrap().code(), tonic::Code::InvalidArgument); + + let fut = { + let mut client = client.clone(); + let request = tonic::Request::new(DeleteKeysRequest { + keys: vec![ZECPAGES_SAPLING_VIEWING_KEY.to_string(); 11], + }); + tokio::spawn(async move { client.delete_keys(request).await }) + }; + + let response = fut.await.expect("tokio task should join successfully"); + assert!(response.is_err()); + assert_eq!(response.err().unwrap().code(), tonic::Code::InvalidArgument); +} diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index a31445c3b99..25ed36b0908 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-network" -version = "1.0.0-beta.30" +version = "1.0.0-beta.35" authors = ["Zcash Foundation ", "Tower Maintainers "] description = "Networking code for Zebra" # # Legal @@ -40,33 +40,33 @@ progress-bar = [ proptest-impl = ["proptest", "proptest-derive", "zebra-chain/proptest-impl"] [dependencies] -bitflags = "2.4.1" +bitflags = "2.4.2" byteorder = "1.5.0" bytes = "1.5.0" -chrono = { version = "0.4.31", default-features = false, features = ["clock", "std"] } +chrono = { version = "0.4.34", default-features = false, features = ["clock", "std"] } dirs = "5.0.1" hex = "0.4.3" humantime-serde = "1.1.1" -indexmap = { version = "2.0.1", features = ["serde"] } -itertools = "0.11.0" +indexmap = { version = "2.2.3", features = ["serde"] } +itertools = "0.12.1" lazy_static = "1.4.0" -num-integer = "0.1.45" +num-integer = "0.1.46" ordered-map = "0.4.2" -pin-project = "1.1.3" +pin-project = "1.1.4" rand = "0.8.5" -rayon = "1.7.0" -regex = "1.10.2" -serde = { version = "1.0.188", features = ["serde_derive"] } -tempfile = "3.8.0" -thiserror = "1.0.48" - -futures = "0.3.28" -tokio = { version = "1.33.0", features = ["fs", "io-util", "net", "time", "tracing", "macros", "rt-multi-thread"] } +rayon = "1.8.1" +regex = "1.10.3" +serde = { version = "1.0.196", features = ["serde_derive"] } +tempfile = "3.10.0" +thiserror = "1.0.57" + +futures = "0.3.30" +tokio = { version = "1.36.0", features = ["fs", "io-util", "net", "time", "tracing", "macros", "rt-multi-thread"] } tokio-stream = { version = "0.1.14", features = ["sync", "time"] } -tokio-util = { version = "0.7.8", features = ["codec"] } +tokio-util = { version = "0.7.10", features = ["codec"] } tower = { version = "0.4.13", features = ["retry", "discover", "load", "load-shed", "timeout", "util", "buffer"] } -metrics = "0.21.1" +metrics = "0.22.1" tracing-futures = "0.2.5" tracing-error = { version = "0.2.0", features = ["traced-error"] } tracing = "0.1.39" @@ -80,18 +80,18 @@ howudoin = { version = "0.1.2", optional = true } # tor-rtcompat = { version = "0.0.2", optional = true } # proptest dependencies -proptest = { version = "1.3.1", optional = true } +proptest = { version = "1.4.0", optional = true } proptest-derive = { version = "0.4.0", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.30", features = ["async-error"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.35", features = ["async-error"] } [dev-dependencies] -proptest = "1.3.1" +proptest = "1.4.0" proptest-derive = "0.4.0" static_assertions = "1.1.0" -tokio = { version = "1.33.0", features = ["full", "tracing", "test-util"] } -toml = "0.8.3" +tokio = { version = "1.36.0", features = ["full", "tracing", "test-util"] } +toml = "0.8.10" zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } zebra-test = { path = "../zebra-test/" } diff --git a/zebra-network/LICENSE b/zebra-network/LICENSE index 9862976a6ce..5428318519b 100644 --- a/zebra-network/LICENSE +++ b/zebra-network/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2019-2023 Zcash Foundation +Copyright (c) 2019-2024 Zcash Foundation Copyright (c) 2019 Tower Contributors Permission is hereby granted, free of charge, to any diff --git a/zebra-network/src/address_book.rs b/zebra-network/src/address_book.rs index 548c96aa0a7..2f2142b6e0e 100644 --- a/zebra-network/src/address_book.rs +++ b/zebra-network/src/address_book.rs @@ -18,7 +18,7 @@ use tracing::Span; use zebra_chain::{parameters::Network, serialization::DateTime32}; use crate::{ - constants, + constants::{self, ADDR_RESPONSE_LIMIT_DENOMINATOR, MAX_ADDRS_IN_MESSAGE}, meta_addr::MetaAddrChange, protocol::external::{canonical_peer_addr, canonical_socket_addr}, types::MetaAddr, @@ -112,9 +112,6 @@ pub struct AddressMetrics { /// The number of addresses in the `NeverAttemptedGossiped` state. pub never_attempted_gossiped: usize, - /// The number of addresses in the `NeverAttemptedAlternate` state. - pub never_attempted_alternate: usize, - /// The number of addresses in the `Failed` state. pub failed: usize, @@ -268,7 +265,20 @@ impl AddressBook { /// Get the active addresses in `self` in random order with sanitized timestamps, /// including our local listener address. - pub fn sanitized(&self, now: chrono::DateTime) -> Vec { + /// + /// Limited to a the number of peer addresses Zebra should give out per `GetAddr` request. + pub fn fresh_get_addr_response(&self) -> Vec { + let now = Utc::now(); + let mut peers = self.sanitized(now); + let address_limit = peers.len().div_ceil(ADDR_RESPONSE_LIMIT_DENOMINATOR); + peers.truncate(MAX_ADDRS_IN_MESSAGE.min(address_limit)); + + peers + } + + /// Get the active addresses in `self` in random order with sanitized timestamps, + /// including our local listener address. + pub(crate) fn sanitized(&self, now: chrono::DateTime) -> Vec { use rand::seq::SliceRandom; let _guard = self.span.enter(); @@ -549,7 +559,7 @@ impl AddressBook { /// Return an iterator over all peers. /// /// Returns peers in reconnection attempt order, including recently connected peers. - pub fn peers(&'_ self) -> impl Iterator + DoubleEndedIterator + '_ { + pub fn peers(&'_ self) -> impl DoubleEndedIterator + '_ { let _guard = self.span.enter(); self.by_addr.descending_values().cloned() } @@ -580,7 +590,7 @@ impl AddressBook { &'_ self, instant_now: Instant, chrono_now: chrono::DateTime, - ) -> impl Iterator + DoubleEndedIterator + '_ { + ) -> impl DoubleEndedIterator + '_ { let _guard = self.span.enter(); // Skip live peers, and peers pending a reconnect attempt. @@ -599,7 +609,7 @@ impl AddressBook { pub fn state_peers( &'_ self, state: PeerAddrState, - ) -> impl Iterator + DoubleEndedIterator + '_ { + ) -> impl DoubleEndedIterator + '_ { let _guard = self.span.enter(); self.by_addr @@ -614,7 +624,7 @@ impl AddressBook { &'_ self, instant_now: Instant, chrono_now: chrono::DateTime, - ) -> impl Iterator + DoubleEndedIterator + '_ { + ) -> impl DoubleEndedIterator + '_ { let _guard = self.span.enter(); self.by_addr @@ -654,9 +664,6 @@ impl AddressBook { let never_attempted_gossiped = self .state_peers(PeerAddrState::NeverAttemptedGossiped) .count(); - let never_attempted_alternate = self - .state_peers(PeerAddrState::NeverAttemptedAlternate) - .count(); let failed = self.state_peers(PeerAddrState::Failed).count(); let attempt_pending = self.state_peers(PeerAddrState::AttemptPending).count(); @@ -670,7 +677,6 @@ impl AddressBook { AddressMetrics { responded, never_attempted_gossiped, - never_attempted_alternate, failed, attempt_pending, recently_live, @@ -690,22 +696,15 @@ impl AddressBook { let _ = self.address_metrics_tx.send(m); // TODO: rename to address_book.[state_name] - metrics::gauge!("candidate_set.responded", m.responded as f64); - metrics::gauge!("candidate_set.gossiped", m.never_attempted_gossiped as f64); - metrics::gauge!( - "candidate_set.alternate", - m.never_attempted_alternate as f64, - ); - metrics::gauge!("candidate_set.failed", m.failed as f64); - metrics::gauge!("candidate_set.pending", m.attempt_pending as f64); + metrics::gauge!("candidate_set.responded").set(m.responded as f64); + metrics::gauge!("candidate_set.gossiped").set(m.never_attempted_gossiped as f64); + metrics::gauge!("candidate_set.failed").set(m.failed as f64); + metrics::gauge!("candidate_set.pending").set(m.attempt_pending as f64); // TODO: rename to address_book.responded.recently_live - metrics::gauge!("candidate_set.recently_live", m.recently_live as f64); + metrics::gauge!("candidate_set.recently_live").set(m.recently_live as f64); // TODO: rename to address_book.responded.stopped_responding - metrics::gauge!( - "candidate_set.disconnected", - m.recently_stopped_responding as f64, - ); + metrics::gauge!("candidate_set.disconnected").set(m.recently_stopped_responding as f64); std::mem::drop(_guard); self.log_metrics(&m, instant_now); @@ -741,12 +740,7 @@ impl AddressBook { self.last_address_log = Some(now); // if all peers have failed - if m.responded - + m.attempt_pending - + m.never_attempted_gossiped - + m.never_attempted_alternate - == 0 - { + if m.responded + m.attempt_pending + m.never_attempted_gossiped == 0 { warn!( address_metrics = ?m, "all peer addresses have failed. Hint: check your network connection" diff --git a/zebra-network/src/address_book/tests/prop.rs b/zebra-network/src/address_book/tests/prop.rs index 732d477379b..75ad6635a36 100644 --- a/zebra-network/src/address_book/tests/prop.rs +++ b/zebra-network/src/address_book/tests/prop.rs @@ -9,7 +9,10 @@ use tracing::Span; use zebra_chain::{parameters::Network::*, serialization::Duration32}; use crate::{ - constants::{DEFAULT_MAX_CONNS_PER_IP, MAX_ADDRS_IN_ADDRESS_BOOK, MAX_PEER_ACTIVE_FOR_GOSSIP}, + constants::{ + ADDR_RESPONSE_LIMIT_DENOMINATOR, DEFAULT_MAX_CONNS_PER_IP, MAX_ADDRS_IN_ADDRESS_BOOK, + MAX_ADDRS_IN_MESSAGE, MAX_PEER_ACTIVE_FOR_GOSSIP, + }, meta_addr::{arbitrary::MAX_META_ADDR, MetaAddr, MetaAddrChange}, AddressBook, }; @@ -36,8 +39,17 @@ proptest! { addresses ); - for gossiped_address in address_book.sanitized(chrono_now) { - let duration_since_last_seen = gossiped_address + // Only recently reachable are sanitized + let sanitized = address_book.sanitized(chrono_now); + let gossiped = address_book.fresh_get_addr_response(); + + let expected_num_gossiped = sanitized.len().div_ceil(ADDR_RESPONSE_LIMIT_DENOMINATOR).min(MAX_ADDRS_IN_MESSAGE); + let num_gossiped = gossiped.len(); + + prop_assert_eq!(expected_num_gossiped, num_gossiped); + + for sanitized_address in sanitized { + let duration_since_last_seen = sanitized_address .last_seen() .expect("Peer that was never seen before is being gossiped") .saturating_elapsed(chrono_now) diff --git a/zebra-network/src/address_book/tests/vectors.rs b/zebra-network/src/address_book/tests/vectors.rs index e401e6a5de3..16a9544429c 100644 --- a/zebra-network/src/address_book/tests/vectors.rs +++ b/zebra-network/src/address_book/tests/vectors.rs @@ -128,9 +128,7 @@ fn address_book_peer_order() { fn reconnection_peers_skips_recently_updated_ip() { // tests that reconnection_peers() skips addresses where there's a connection at that IP with a recent: // - `last_response` - test_reconnection_peers_skips_recently_updated_ip(true, |addr| { - MetaAddr::new_responded(addr, &PeerServices::NODE_NETWORK) - }); + test_reconnection_peers_skips_recently_updated_ip(true, MetaAddr::new_responded); // tests that reconnection_peers() *does not* skip addresses where there's a connection at that IP with a recent: // - `last_attempt` diff --git a/zebra-network/src/address_book_updater.rs b/zebra-network/src/address_book_updater.rs index ef503bc8d82..2feeb49e8e0 100644 --- a/zebra-network/src/address_book_updater.rs +++ b/zebra-network/src/address_book_updater.rs @@ -1,6 +1,6 @@ //! The timestamp collector collects liveness information from peers. -use std::{net::SocketAddr, sync::Arc}; +use std::{cmp::max, net::SocketAddr, sync::Arc}; use thiserror::Error; use tokio::{ @@ -13,6 +13,9 @@ use crate::{ address_book::AddressMetrics, meta_addr::MetaAddrChange, AddressBook, BoxError, Config, }; +/// The minimum size of the address book updater channel. +pub const MIN_CHANNEL_SIZE: usize = 10; + /// The `AddressBookUpdater` hooks into incoming message streams for each peer /// and lets the owner of the sender handle update the address book. For /// example, it can be used to record per-connection last-seen timestamps, or @@ -46,7 +49,10 @@ impl AddressBookUpdater { ) { // Create an mpsc channel for peerset address book updates, // based on the maximum number of inbound and outbound peers. - let (worker_tx, mut worker_rx) = mpsc::channel(config.peerset_total_connection_limit()); + let (worker_tx, mut worker_rx) = mpsc::channel(max( + config.peerset_total_connection_limit(), + MIN_CHANNEL_SIZE, + )); let address_book = AddressBook::new( local_listener, @@ -100,10 +106,9 @@ impl AddressBookUpdater { .set_pos(u64::try_from(address_info.num_addresses).expect("fits in u64")); // .set_len(u64::try_from(address_info.address_limit).expect("fits in u64")); - let never_attempted = address_info.never_attempted_alternate - + address_info.never_attempted_gossiped; - - never_bar.set_pos(u64::try_from(never_attempted).expect("fits in u64")); + never_bar.set_pos( + u64::try_from(address_info.never_attempted_gossiped).expect("fits in u64"), + ); // .set_len(u64::try_from(address_info.address_limit).expect("fits in u64")); failed_bar.set_pos(u64::try_from(address_info.failed).expect("fits in u64")); diff --git a/zebra-network/src/config.rs b/zebra-network/src/config.rs index 402ee6fc4cc..2c7a9b416ff 100644 --- a/zebra-network/src/config.rs +++ b/zebra-network/src/config.rs @@ -46,7 +46,7 @@ pub use cache_dir::CacheDir; const MAX_SINGLE_SEED_PEER_DNS_RETRIES: usize = 0; /// Configuration for networking code. -#[derive(Clone, Debug, Serialize)] +#[derive(Clone, Debug, Eq, PartialEq, Serialize)] #[serde(deny_unknown_fields, default)] pub struct Config { /// The address on which this node should listen for connections. @@ -353,10 +353,10 @@ impl Config { // (But we only make one initial connection attempt to each IP.) metrics::counter!( "zcash.net.peers.initial", - 1, "seed" => host.to_string(), "remote_ip" => ip.to_string() - ); + ) + .increment(1); } Ok(ip_addrs.into_iter().collect()) @@ -440,10 +440,10 @@ impl Config { // (But we only make one initial connection attempt to each IP.) metrics::counter!( "zcash.net.peers.initial", - 1, "cache" => peer_cache_file.display().to_string(), "remote_ip" => ip.to_string() - ); + ) + .increment(1); } Ok(peer_list) @@ -553,10 +553,10 @@ impl Config { for ip in &peer_list { metrics::counter!( "zcash.net.peers.cache", - 1, "cache" => peer_cache_file.display().to_string(), "remote_ip" => ip.to_string() - ); + ) + .increment(1); } Ok(()) diff --git a/zebra-network/src/constants.rs b/zebra-network/src/constants.rs index 32c1c477599..12fabbff51a 100644 --- a/zebra-network/src/constants.rs +++ b/zebra-network/src/constants.rs @@ -84,6 +84,10 @@ pub const DEFAULT_MAX_CONNS_PER_IP: usize = 1; /// This will be used as `Config.peerset_initial_target_size` if no valid value is provided. pub const DEFAULT_PEERSET_INITIAL_TARGET_SIZE: usize = 25; +/// The maximum number of peers we will add to the address book after each `getaddr` request. +pub const PEER_ADDR_RESPONSE_LIMIT: usize = + DEFAULT_PEERSET_INITIAL_TARGET_SIZE * OUTBOUND_PEER_LIMIT_MULTIPLIER / 2; + /// The buffer size for the peer set. /// /// This should be greater than 1 to avoid sender contention, but also reasonably @@ -305,7 +309,7 @@ pub const MAX_ADDRS_IN_MESSAGE: usize = 1000; /// /// This limit makes sure that Zebra does not reveal its entire address book /// in a single `Peers` response. -pub const ADDR_RESPONSE_LIMIT_DENOMINATOR: usize = 3; +pub const ADDR_RESPONSE_LIMIT_DENOMINATOR: usize = 4; /// The maximum number of addresses Zebra will keep in its address book. /// @@ -495,8 +499,9 @@ mod tests { #[test] #[allow(clippy::assertions_on_constants)] fn ensure_address_limits_consistent() { - // Zebra 1.0.0-beta.2 address book metrics in December 2021. - const TYPICAL_MAINNET_ADDRESS_BOOK_SIZE: usize = 4_500; + // Estimated network address book size in November 2023, after the address book limit was increased. + // Zebra 1.0.0-beta.2 address book metrics in December 2021 showed 4500 peers. + const TYPICAL_MAINNET_ADDRESS_BOOK_SIZE: usize = 5_500; let _init_guard = zebra_test::init(); @@ -511,7 +516,7 @@ mod tests { ); assert!( - MAX_ADDRS_IN_ADDRESS_BOOK < TYPICAL_MAINNET_ADDRESS_BOOK_SIZE, + MAX_ADDRS_IN_ADDRESS_BOOK <= TYPICAL_MAINNET_ADDRESS_BOOK_SIZE, "the address book limit should actually be used" ); } @@ -523,11 +528,7 @@ mod tests { assert!( INVENTORY_ROTATION_INTERVAL - < Duration::from_secs( - POST_BLOSSOM_POW_TARGET_SPACING - .try_into() - .expect("non-negative"), - ), + < Duration::from_secs(POST_BLOSSOM_POW_TARGET_SPACING.into()), "we should expire inventory every time 1-2 new blocks get generated" ); } diff --git a/zebra-network/src/lib.rs b/zebra-network/src/lib.rs index 0060453846f..254f85cde87 100644 --- a/zebra-network/src/lib.rs +++ b/zebra-network/src/lib.rs @@ -131,17 +131,7 @@ #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] -#![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_network")] -// -// Rust 1.72 has a false positive when nested generics are used inside Arc. -// This makes the `arc_with_non_send_sync` lint trigger on a lot of proptest code. -// -// TODO: remove this allow when Rust 1.73 is stable, because this lint bug is fixed in that release: -// -#![cfg_attr( - any(test, feature = "proptest-impl"), - allow(clippy::arc_with_non_send_sync) -)] +#![doc(html_root_url = "https://docs.rs/zebra_network")] #[macro_use] extern crate pin_project; @@ -159,11 +149,12 @@ extern crate bitflags; /// parameterized by 'a), *not* that the object itself has 'static lifetime. pub type BoxError = Box; -mod address_book; pub mod address_book_peers; -mod address_book_updater; -mod config; +pub mod config; pub mod constants; + +mod address_book; +mod address_book_updater; mod isolated; mod meta_addr; mod peer; diff --git a/zebra-network/src/meta_addr.rs b/zebra-network/src/meta_addr.rs index 1f8572fd53c..507620f29a0 100644 --- a/zebra-network/src/meta_addr.rs +++ b/zebra-network/src/meta_addr.rs @@ -54,15 +54,10 @@ pub enum PeerAddrState { Responded, /// The peer's address has just been fetched from a DNS seeder, or via peer - /// gossip, but we haven't attempted to connect to it yet. + /// gossip, or as part of a `Version` message, or guessed from an inbound remote IP, + /// but we haven't attempted to connect to it yet. NeverAttemptedGossiped, - /// The peer's address has just been received as part of a `Version` message, - /// so we might already be connected to this peer. - /// - /// Alternate addresses are attempted after gossiped addresses. - NeverAttemptedAlternate, - /// The peer's TCP connection failed, or the peer sent us an unexpected /// Zcash protocol message, so we failed the connection. Failed, @@ -75,7 +70,7 @@ impl PeerAddrState { /// Return true if this state is a "never attempted" state. pub fn is_never_attempted(&self) -> bool { match self { - NeverAttemptedGossiped | NeverAttemptedAlternate => true, + NeverAttemptedGossiped => true, AttemptPending | Responded | Failed => false, } } @@ -88,17 +83,8 @@ impl PeerAddrState { use Ordering::*; match (self, other) { _ if self == other => Equal, - // Peers start in one of the "never attempted" states, + // Peers start in the "never attempted" state, // then typically progress towards a "responded" or "failed" state. - // - // # Security - // - // Prefer gossiped addresses to alternate addresses, - // so that peers can't replace the addresses of other peers. - // (This is currently checked explicitly by the address update code, - // but we respect the same order here as a precaution.) - (NeverAttemptedAlternate, _) => Less, - (_, NeverAttemptedAlternate) => Greater, (NeverAttemptedGossiped, _) => Less, (_, NeverAttemptedGossiped) => Greater, (AttemptPending, _) => Less, @@ -139,8 +125,6 @@ impl Ord for PeerAddrState { (_, Responded) => Greater, (NeverAttemptedGossiped, _) => Less, (_, NeverAttemptedGossiped) => Greater, - (NeverAttemptedAlternate, _) => Less, - (_, NeverAttemptedAlternate) => Greater, (Failed, _) => Less, (_, Failed) => Greater, // These patterns are redundant, but Rust doesn't assume that `==` is reflexive, @@ -249,20 +233,18 @@ pub enum MetaAddrChange { untrusted_last_seen: DateTime32, }, - /// Creates new alternate `MetaAddr`. - /// - /// Based on the canonical peer address in `Version` messages. - NewAlternate { + /// Creates new local listener `MetaAddr`. + NewLocal { #[cfg_attr( any(test, feature = "proptest-impl"), proptest(strategy = "canonical_peer_addr_strategy()") )] addr: PeerSocketAddr, - untrusted_services: PeerServices, }, - /// Creates new local listener `MetaAddr`. - NewLocal { + /// Updates an existing `MetaAddr` when an outbound connection attempt + /// starts. + UpdateAttempt { #[cfg_attr( any(test, feature = "proptest-impl"), proptest(strategy = "canonical_peer_addr_strategy()") @@ -270,14 +252,14 @@ pub enum MetaAddrChange { addr: PeerSocketAddr, }, - /// Updates an existing `MetaAddr` when an outbound connection attempt - /// starts. - UpdateAttempt { + /// Updates an existing `MetaAddr` when we've made a successful connection with a peer. + UpdateConnected { #[cfg_attr( any(test, feature = "proptest-impl"), proptest(strategy = "canonical_peer_addr_strategy()") )] addr: PeerSocketAddr, + services: PeerServices, }, /// Updates an existing `MetaAddr` when a peer responds with a message. @@ -287,7 +269,6 @@ pub enum MetaAddrChange { proptest(strategy = "canonical_peer_addr_strategy()") )] addr: PeerSocketAddr, - services: PeerServices, }, /// Updates an existing `MetaAddr` when a peer fails. @@ -345,8 +326,8 @@ impl MetaAddr { }) } - /// Returns a [`MetaAddrChange::UpdateResponded`] for a peer that has just - /// sent us a message. + /// Returns a [`MetaAddrChange::UpdateConnected`] for a peer that has just successfully + /// connected. /// /// # Security /// @@ -354,33 +335,38 @@ impl MetaAddr { /// and the services must be the services from that peer's handshake. /// /// Otherwise: - /// - malicious peers could interfere with other peers' [`AddressBook`](crate::AddressBook) state, - /// or + /// - malicious peers could interfere with other peers' [`AddressBook`](crate::AddressBook) + /// state, or /// - Zebra could advertise unreachable addresses to its own peers. - pub fn new_responded(addr: PeerSocketAddr, services: &PeerServices) -> MetaAddrChange { - UpdateResponded { + pub fn new_connected(addr: PeerSocketAddr, services: &PeerServices) -> MetaAddrChange { + UpdateConnected { addr: canonical_peer_addr(*addr), services: *services, } } - /// Returns a [`MetaAddrChange::UpdateAttempt`] for a peer that we - /// want to make an outbound connection to. - pub fn new_reconnect(addr: PeerSocketAddr) -> MetaAddrChange { - UpdateAttempt { + /// Returns a [`MetaAddrChange::UpdateResponded`] for a peer that has just + /// sent us a message. + /// + /// # Security + /// + /// This address must be the remote address from an outbound connection. + /// + /// Otherwise: + /// - malicious peers could interfere with other peers' [`AddressBook`](crate::AddressBook) + /// state, or + /// - Zebra could advertise unreachable addresses to its own peers. + pub fn new_responded(addr: PeerSocketAddr) -> MetaAddrChange { + UpdateResponded { addr: canonical_peer_addr(*addr), } } - /// Returns a [`MetaAddrChange::NewAlternate`] for a peer's alternate address, - /// received via a `Version` message. - pub fn new_alternate( - addr: PeerSocketAddr, - untrusted_services: &PeerServices, - ) -> MetaAddrChange { - NewAlternate { + /// Returns a [`MetaAddrChange::UpdateAttempt`] for a peer that we + /// want to make an outbound connection to. + pub fn new_reconnect(addr: PeerSocketAddr) -> MetaAddrChange { + UpdateAttempt { addr: canonical_peer_addr(*addr), - untrusted_services: *untrusted_services, } } @@ -391,8 +377,7 @@ impl MetaAddr { } } - /// Returns a [`MetaAddrChange::UpdateFailed`] for a peer that has just had - /// an error. + /// Returns a [`MetaAddrChange::UpdateFailed`] for a peer that has just had an error. pub fn new_errored( addr: PeerSocketAddr, services: impl Into>, @@ -404,13 +389,10 @@ impl MetaAddr { } /// Create a new `MetaAddr` for a peer that has just shut down. - pub fn new_shutdown( - addr: PeerSocketAddr, - services: impl Into>, - ) -> MetaAddrChange { + pub fn new_shutdown(addr: PeerSocketAddr) -> MetaAddrChange { // TODO: if the peer shut down in the Responded state, preserve that // state. All other states should be treated as (timeout) errors. - MetaAddr::new_errored(addr, services.into()) + MetaAddr::new_errored(addr, None) } /// Return the address for this `MetaAddr`. @@ -693,9 +675,9 @@ impl MetaAddrChange { match self { NewInitial { addr } | NewGossiped { addr, .. } - | NewAlternate { addr, .. } | NewLocal { addr, .. } | UpdateAttempt { addr } + | UpdateConnected { addr, .. } | UpdateResponded { addr, .. } | UpdateFailed { addr, .. } => *addr, } @@ -709,9 +691,9 @@ impl MetaAddrChange { match self { NewInitial { addr } | NewGossiped { addr, .. } - | NewAlternate { addr, .. } | NewLocal { addr, .. } | UpdateAttempt { addr } + | UpdateConnected { addr, .. } | UpdateResponded { addr, .. } | UpdateFailed { addr, .. } => *addr = new_addr, } @@ -721,17 +703,15 @@ impl MetaAddrChange { pub fn untrusted_services(&self) -> Option { match self { NewInitial { .. } => None, + // TODO: split untrusted and direct services (#2324) NewGossiped { untrusted_services, .. } => Some(*untrusted_services), - NewAlternate { - untrusted_services, .. - } => Some(*untrusted_services), // TODO: create a "services implemented by Zebra" constant (#2324) NewLocal { .. } => Some(PeerServices::NODE_NETWORK), UpdateAttempt { .. } => None, - // TODO: split untrusted and direct services (#2324) - UpdateResponded { services, .. } => Some(*services), + UpdateConnected { services, .. } => Some(*services), + UpdateResponded { .. } => None, UpdateFailed { services, .. } => *services, } } @@ -744,12 +724,12 @@ impl MetaAddrChange { untrusted_last_seen, .. } => Some(*untrusted_last_seen), - NewAlternate { .. } => None, // We know that our local listener is available NewLocal { .. } => Some(now), - UpdateAttempt { .. } => None, - UpdateResponded { .. } => None, - UpdateFailed { .. } => None, + UpdateAttempt { .. } + | UpdateConnected { .. } + | UpdateResponded { .. } + | UpdateFailed { .. } => None, } } @@ -775,33 +755,25 @@ impl MetaAddrChange { /// Return the last attempt for this change, if available. pub fn last_attempt(&self, now: Instant) -> Option { match self { - NewInitial { .. } => None, - NewGossiped { .. } => None, - NewAlternate { .. } => None, - NewLocal { .. } => None, + NewInitial { .. } | NewGossiped { .. } | NewLocal { .. } => None, // Attempt changes are applied before we start the handshake to the // peer address. So the attempt time is a lower bound for the actual // handshake time. UpdateAttempt { .. } => Some(now), - UpdateResponded { .. } => None, - UpdateFailed { .. } => None, + UpdateConnected { .. } | UpdateResponded { .. } | UpdateFailed { .. } => None, } } /// Return the last response for this change, if available. pub fn last_response(&self, now: DateTime32) -> Option { match self { - NewInitial { .. } => None, - NewGossiped { .. } => None, - NewAlternate { .. } => None, - NewLocal { .. } => None, - UpdateAttempt { .. } => None, + NewInitial { .. } | NewGossiped { .. } | NewLocal { .. } | UpdateAttempt { .. } => None, // If there is a large delay applying this change, then: // - the peer might stay in the `AttemptPending` state for longer, // - we might send outdated last seen times to our peers, and // - the peer will appear to be live for longer, delaying future // reconnection attempts. - UpdateResponded { .. } => Some(now), + UpdateConnected { .. } | UpdateResponded { .. } => Some(now), UpdateFailed { .. } => None, } } @@ -809,12 +781,12 @@ impl MetaAddrChange { /// Return the last failure for this change, if available. pub fn last_failure(&self, now: Instant) -> Option { match self { - NewInitial { .. } => None, - NewGossiped { .. } => None, - NewAlternate { .. } => None, - NewLocal { .. } => None, - UpdateAttempt { .. } => None, - UpdateResponded { .. } => None, + NewInitial { .. } + | NewGossiped { .. } + | NewLocal { .. } + | UpdateAttempt { .. } + | UpdateConnected { .. } + | UpdateResponded { .. } => None, // If there is a large delay applying this change, then: // - the peer might stay in the `AttemptPending` or `Responded` // states for longer, and @@ -829,11 +801,10 @@ impl MetaAddrChange { match self { NewInitial { .. } => NeverAttemptedGossiped, NewGossiped { .. } => NeverAttemptedGossiped, - NewAlternate { .. } => NeverAttemptedAlternate, // local listeners get sanitized, so the state doesn't matter here NewLocal { .. } => NeverAttemptedGossiped, UpdateAttempt { .. } => AttemptPending, - UpdateResponded { .. } => Responded, + UpdateConnected { .. } | UpdateResponded { .. } => Responded, UpdateFailed { .. } => Failed, } } diff --git a/zebra-network/src/meta_addr/arbitrary.rs b/zebra-network/src/meta_addr/arbitrary.rs index d7d4dd840e8..985b36693ea 100644 --- a/zebra-network/src/meta_addr/arbitrary.rs +++ b/zebra-network/src/meta_addr/arbitrary.rs @@ -1,6 +1,6 @@ //! Randomised test data generation for MetaAddr. -use std::{net::IpAddr, time::Instant}; +use std::net::IpAddr; use proptest::{arbitrary::any, collection::vec, prelude::*}; @@ -45,28 +45,6 @@ impl MetaAddr { }) .boxed() } - - /// Create a strategy that generates [`MetaAddr`]s in the - /// [`NeverAttemptedAlternate`][1] state. - /// - /// [1]: super::PeerAddrState::NeverAttemptedAlternate - pub fn alternate_strategy() -> BoxedStrategy { - ( - canonical_peer_addr_strategy(), - any::(), - any::(), - any::(), - ) - .prop_map( - |(socket_addr, untrusted_services, instant_now, local_now)| { - // instant_now is not actually used for this variant, - // so we could just provide a default value - MetaAddr::new_alternate(socket_addr, &untrusted_services) - .into_new_meta_addr(instant_now, local_now) - }, - ) - .boxed() - } } impl MetaAddrChange { @@ -109,33 +87,27 @@ impl MetaAddrChange { .boxed() } - /// Create a strategy that generates port numbers for [`MetaAddrChange`]s which are ready for + /// Create a strategy that generates port numbers for [`MetaAddr`]s which are ready for /// outbound connections. /// - /// Currently, all generated changes are the [`NewAlternate`][1] variant. - /// TODO: Generate all [`MetaAddrChange`] variants, and give them ready - /// fields. (After PR #2276 merges.) + /// Currently, all generated [`MetaAddr`]s are the [`NeverAttemptedGossiped`][1] variant. + /// + /// TODO: Generate all [`MetaAddr`] variants, and give them ready fields. /// - /// [1]: super::NewAlternate + /// [1]: super::NeverAttemptedGossiped pub fn ready_outbound_strategy_port() -> BoxedStrategy { ( canonical_peer_addr_strategy(), - any::(), + any::(), any::(), ) .prop_filter_map( "failed MetaAddr::is_valid_for_outbound", - |(addr, instant_now, local_now)| { - // Alternate nodes use the current time, so they're always ready - // - // TODO: create a "Zebra supported services" constant + |(addr, services, local_now)| { + let addr = MetaAddr::new_gossiped_meta_addr(addr, services, local_now); - let change = MetaAddr::new_alternate(addr, &PeerServices::NODE_NETWORK); - if change - .into_new_meta_addr(instant_now, local_now) - .last_known_info_is_valid_for_outbound(Mainnet) - { - Some(addr.port()) + if addr.last_known_info_is_valid_for_outbound(Mainnet) { + Some(addr.addr.port()) } else { None } diff --git a/zebra-network/src/meta_addr/tests/vectors.rs b/zebra-network/src/meta_addr/tests/vectors.rs index 5b341901b18..d5a81bfd996 100644 --- a/zebra-network/src/meta_addr/tests/vectors.rs +++ b/zebra-network/src/meta_addr/tests/vectors.rs @@ -74,25 +74,6 @@ fn new_local_listener_is_gossipable() { assert!(peer.is_active_for_gossip(chrono_now)); } -/// Test if a recently received alternate peer address is not gossipable. -/// -/// Such [`MetaAddr`] is only considered gossipable after Zebra has tried to connect to it and -/// confirmed that the address is reachable. -#[test] -fn new_alternate_peer_address_is_not_gossipable() { - let _init_guard = zebra_test::init(); - - let instant_now = Instant::now(); - let chrono_now = Utc::now(); - let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); - - let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); - let peer = MetaAddr::new_alternate(address, &PeerServices::NODE_NETWORK) - .into_new_meta_addr(instant_now, local_now); - - assert!(!peer.is_active_for_gossip(chrono_now)); -} - /// Test if recently received gossiped peer is gossipable. #[test] fn gossiped_peer_reportedly_to_be_seen_recently_is_gossipable() { @@ -166,11 +147,10 @@ fn recently_responded_peer_is_gossipable() { let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); - let peer_seed = MetaAddr::new_alternate(address, &PeerServices::NODE_NETWORK) - .into_new_meta_addr(instant_now, local_now); + let peer_seed = MetaAddr::new_initial_peer(address).into_new_meta_addr(instant_now, local_now); // Create a peer that has responded - let peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) + let peer = MetaAddr::new_responded(address) .apply_to_meta_addr(peer_seed, instant_now, chrono_now) .expect("Failed to create MetaAddr for responded peer"); @@ -187,11 +167,10 @@ fn not_so_recently_responded_peer_is_still_gossipable() { let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); - let peer_seed = MetaAddr::new_alternate(address, &PeerServices::NODE_NETWORK) - .into_new_meta_addr(instant_now, local_now); + let peer_seed = MetaAddr::new_initial_peer(address).into_new_meta_addr(instant_now, local_now); // Create a peer that has responded - let mut peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) + let mut peer = MetaAddr::new_responded(address) .apply_to_meta_addr(peer_seed, instant_now, chrono_now) .expect("Failed to create MetaAddr for responded peer"); @@ -218,11 +197,10 @@ fn responded_long_ago_peer_is_not_gossipable() { let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); - let peer_seed = MetaAddr::new_alternate(address, &PeerServices::NODE_NETWORK) - .into_new_meta_addr(instant_now, local_now); + let peer_seed = MetaAddr::new_initial_peer(address).into_new_meta_addr(instant_now, local_now); // Create a peer that has responded - let mut peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) + let mut peer = MetaAddr::new_responded(address) .apply_to_meta_addr(peer_seed, instant_now, chrono_now) .expect("Failed to create MetaAddr for responded peer"); @@ -249,11 +227,10 @@ fn long_delayed_change_is_not_applied() { let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); - let peer_seed = MetaAddr::new_alternate(address, &PeerServices::NODE_NETWORK) - .into_new_meta_addr(instant_now, local_now); + let peer_seed = MetaAddr::new_initial_peer(address).into_new_meta_addr(instant_now, local_now); // Create a peer that has responded - let peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) + let peer = MetaAddr::new_responded(address) .apply_to_meta_addr(peer_seed, instant_now, chrono_now) .expect("Failed to create MetaAddr for responded peer"); @@ -293,11 +270,10 @@ fn later_revert_change_is_applied() { let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); - let peer_seed = MetaAddr::new_alternate(address, &PeerServices::NODE_NETWORK) - .into_new_meta_addr(instant_now, local_now); + let peer_seed = MetaAddr::new_initial_peer(address).into_new_meta_addr(instant_now, local_now); // Create a peer that has responded - let peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) + let peer = MetaAddr::new_responded(address) .apply_to_meta_addr(peer_seed, instant_now, chrono_now) .expect("Failed to create MetaAddr for responded peer"); @@ -336,11 +312,10 @@ fn concurrent_state_revert_change_is_not_applied() { let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); - let peer_seed = MetaAddr::new_alternate(address, &PeerServices::NODE_NETWORK) - .into_new_meta_addr(instant_now, local_now); + let peer_seed = MetaAddr::new_initial_peer(address).into_new_meta_addr(instant_now, local_now); // Create a peer that has responded - let peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) + let peer = MetaAddr::new_responded(address) .apply_to_meta_addr(peer_seed, instant_now, chrono_now) .expect("Failed to create MetaAddr for responded peer"); @@ -396,11 +371,10 @@ fn concurrent_state_progress_change_is_applied() { let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); - let peer_seed = MetaAddr::new_alternate(address, &PeerServices::NODE_NETWORK) - .into_new_meta_addr(instant_now, local_now); + let peer_seed = MetaAddr::new_initial_peer(address).into_new_meta_addr(instant_now, local_now); // Create a peer that has responded - let peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) + let peer = MetaAddr::new_responded(address) .apply_to_meta_addr(peer_seed, instant_now, chrono_now) .expect("Failed to create MetaAddr for responded peer"); diff --git a/zebra-network/src/peer.rs b/zebra-network/src/peer.rs index 13498e9ba2f..b51db0adcf9 100644 --- a/zebra-network/src/peer.rs +++ b/zebra-network/src/peer.rs @@ -10,6 +10,7 @@ mod minimum_peer_version; mod priority; #[cfg(any(test, feature = "proptest-impl"))] +#[allow(unused_imports)] pub use client::tests::ClientTestHarness; #[cfg(test)] @@ -28,6 +29,7 @@ pub use error::{ErrorSlot, HandshakeError, PeerError, SharedPeerError}; pub use handshake::{ConnectedAddr, ConnectionInfo, Handshake, HandshakeRequest}; pub use load_tracked_client::LoadTrackedClient; pub use minimum_peer_version::MinimumPeerVersion; +#[allow(unused_imports)] pub use priority::{ address_is_valid_for_inbound_listeners, address_is_valid_for_outbound_connections, AttributePreference, PeerPreference, diff --git a/zebra-network/src/peer/client.rs b/zebra-network/src/peer/client.rs index 69940275414..8dd81ed3239 100644 --- a/zebra-network/src/peer/client.rs +++ b/zebra-network/src/peer/client.rs @@ -18,6 +18,8 @@ use futures::{ use tokio::{sync::broadcast, task::JoinHandle}; use tower::Service; +use zebra_chain::diagnostic::task::CheckForPanics; + use crate::{ peer::{ error::{AlreadyErrored, ErrorSlot, PeerError, SharedPeerError}, @@ -421,8 +423,15 @@ impl MissingInventoryCollector { impl Client { /// Check if this connection's heartbeat task has exited. + /// + /// Returns an error if the heartbeat task exited. Otherwise, schedules the client task for + /// wakeup when the heartbeat task finishes, or the channel closes, and returns `Pending`. + /// + /// # Panics + /// + /// If the heartbeat task panicked. #[allow(clippy::unwrap_in_result)] - fn check_heartbeat(&mut self, cx: &mut Context<'_>) -> Result<(), SharedPeerError> { + fn poll_heartbeat(&mut self, cx: &mut Context<'_>) -> Poll> { let is_canceled = self .shutdown_tx .as_mut() @@ -430,17 +439,19 @@ impl Client { .poll_canceled(cx) .is_ready(); - if is_canceled { - return self.set_task_exited_error( - "heartbeat", - PeerError::HeartbeatTaskExited("Task was cancelled".to_string()), - ); - } - - match self.heartbeat_task.poll_unpin(cx) { + let result = match self.heartbeat_task.poll_unpin(cx) { Poll::Pending => { - // Heartbeat task is still running. - Ok(()) + // The heartbeat task returns `Pending` while it continues to run. + // But if it has dropped its receiver, it is shutting down, and we should also shut down. + if is_canceled { + self.set_task_exited_error( + "heartbeat", + PeerError::HeartbeatTaskExited("Task was cancelled".to_string()), + ) + } else { + // Heartbeat task is still running. + return Poll::Pending; + } } Poll::Ready(Ok(Ok(_))) => { // Heartbeat task stopped unexpectedly, without panic or error. @@ -459,6 +470,9 @@ impl Client { ) } Poll::Ready(Err(error)) => { + // Heartbeat task panicked. + let error = error.panic_if_task_has_panicked(); + // Heartbeat task was cancelled. if error.is_cancelled() { self.set_task_exited_error( @@ -466,11 +480,7 @@ impl Client { PeerError::HeartbeatTaskExited("Task was cancelled".to_string()), ) } - // Heartbeat task stopped with panic. - else if error.is_panic() { - panic!("heartbeat task has panicked: {error}"); - } - // Heartbeat task stopped with error. + // Heartbeat task stopped with another kind of task error. else { self.set_task_exited_error( "heartbeat", @@ -478,25 +488,48 @@ impl Client { ) } } - } + }; + + Poll::Ready(result) } - /// Check if the connection's task has exited. - fn check_connection(&mut self, context: &mut Context<'_>) -> Result<(), SharedPeerError> { - match self.connection_task.poll_unpin(context) { - Poll::Pending => { - // Connection task is still running. - Ok(()) - } - Poll::Ready(Ok(())) => { + /// Check if the connection's request/response task has exited. + /// + /// Returns an error if the connection task exited. Otherwise, schedules the client task for + /// wakeup when the connection task finishes, and returns `Pending`. + /// + /// # Panics + /// + /// If the connection task panicked. + fn poll_connection(&mut self, context: &mut Context<'_>) -> Poll> { + // Return `Pending` if the connection task is still running. + let result = match ready!(self.connection_task.poll_unpin(context)) { + Ok(()) => { // Connection task stopped unexpectedly, without panicking. self.set_task_exited_error("connection", PeerError::ConnectionTaskExited) } - Poll::Ready(Err(error)) => { - // Connection task stopped unexpectedly with a panic. - panic!("connection task has panicked: {error}"); + Err(error) => { + // Connection task panicked. + let error = error.panic_if_task_has_panicked(); + + // Connection task was cancelled. + if error.is_cancelled() { + self.set_task_exited_error( + "connection", + PeerError::HeartbeatTaskExited("Task was cancelled".to_string()), + ) + } + // Connection task stopped with another kind of task error. + else { + self.set_task_exited_error( + "connection", + PeerError::HeartbeatTaskExited(error.to_string()), + ) + } } - } + }; + + Poll::Ready(result) } /// Properly update the error slot after a background task has unexpectedly stopped. @@ -522,6 +555,9 @@ impl Client { } /// Poll for space in the shared request sender channel. + /// + /// Returns an error if the sender channel is closed. If there is no space in the channel, + /// returns `Pending`, and schedules the task for wakeup when there is space available. fn poll_request(&mut self, cx: &mut Context<'_>) -> Poll> { let server_result = ready!(self.server_tx.poll_ready(cx)); if server_result.is_err() { @@ -536,6 +572,33 @@ impl Client { } } + /// Poll for space in the shared request sender channel, and for errors in the connection tasks. + /// + /// Returns an error if the sender channel is closed, or the heartbeat or connection tasks have + /// terminated. If there is no space in the channel, returns `Pending`, and schedules the task + /// for wakeup when there is space available, or one of the tasks terminates. + fn poll_client(&mut self, cx: &mut Context<'_>) -> Poll> { + // # Correctness + // + // The current task must be scheduled for wakeup every time we return + // `Poll::Pending`. + // + // `poll_heartbeat()` and `poll_connection()` schedule the client task for wakeup + // if either task exits, or if the heartbeat task drops the cancel handle. + // + //`ready!` returns `Poll::Pending` when `server_tx` is unready, and + // schedules this task for wakeup. + // + // It's ok to exit early and skip wakeups when there is an error, because the connection + // and its tasks are shut down immediately on error. + + let _heartbeat_pending: Poll<()> = self.poll_heartbeat(cx)?; + let _connection_pending: Poll<()> = self.poll_connection(cx)?; + + // We're only pending if the sender channel is full. + self.poll_request(cx) + } + /// Shut down the resources held by the client half of this peer connection. /// /// Stops further requests to the remote peer, and stops the heartbeat task. @@ -566,20 +629,16 @@ impl Service for Client { // The current task must be scheduled for wakeup every time we return // `Poll::Pending`. // - // `check_heartbeat` and `check_connection` schedule the client task for wakeup - // if either task exits, or if the heartbeat task drops the cancel handle. + // `poll_client()` schedules the client task for wakeup if the sender channel has space, + // either connection task exits, or if the heartbeat task drops the cancel handle. + + // Check all the tasks and channels. // //`ready!` returns `Poll::Pending` when `server_tx` is unready, and // schedules this task for wakeup. + let result = ready!(self.poll_client(cx)); - let mut result = self - .check_heartbeat(cx) - .and_then(|()| self.check_connection(cx)); - - if result.is_ok() { - result = ready!(self.poll_request(cx)); - } - + // Shut down the client and connection if there is an error. if let Err(error) = result { self.shutdown(); diff --git a/zebra-network/src/peer/connection.rs b/zebra-network/src/peer/connection.rs index 2266085812e..f5ea2d0a4df 100644 --- a/zebra-network/src/peer/connection.rs +++ b/zebra-network/src/peer/connection.rs @@ -14,7 +14,7 @@ use futures::{ prelude::*, stream::Stream, }; -use rand::{thread_rng, Rng}; +use rand::{seq::SliceRandom, thread_rng, Rng}; use tokio::time::{sleep, Sleep}; use tower::{Service, ServiceExt}; use tracing_futures::Instrument; @@ -27,8 +27,8 @@ use zebra_chain::{ use crate::{ constants::{ - self, MAX_OVERLOAD_DROP_PROBABILITY, MIN_OVERLOAD_DROP_PROBABILITY, - OVERLOAD_PROTECTION_INTERVAL, + self, MAX_ADDRS_IN_MESSAGE, MAX_OVERLOAD_DROP_PROBABILITY, MIN_OVERLOAD_DROP_PROBABILITY, + OVERLOAD_PROTECTION_INTERVAL, PEER_ADDR_RESPONSE_LIMIT, }, meta_addr::MetaAddr, peer::{ @@ -137,7 +137,14 @@ impl Handler { /// interpretable as a response, we return ownership to the caller. /// /// Unexpected messages are left unprocessed, and may be rejected later. - fn process_message(&mut self, msg: Message) -> Option { + /// + /// `addr` responses are limited to avoid peer set takeover. Any excess + /// addresses are stored in `cached_addrs`. + fn process_message( + &mut self, + msg: Message, + cached_addrs: &mut Vec, + ) -> Option { let mut ignored_msg = None; // TODO: can this be avoided? let tmp_state = std::mem::replace(self, Handler::Finished(Ok(Response::Nil))); @@ -152,7 +159,24 @@ impl Handler { Handler::Ping(req_nonce) } } - (Handler::Peers, Message::Addr(addrs)) => Handler::Finished(Ok(Response::Peers(addrs))), + + (Handler::Peers, Message::Addr(new_addrs)) => { + // Security: This method performs security-sensitive operations, see its comments + // for details. + let response_addrs = + Handler::update_addr_cache(cached_addrs, &new_addrs, PEER_ADDR_RESPONSE_LIMIT); + + debug!( + new_addrs = new_addrs.len(), + response_addrs = response_addrs.len(), + remaining_addrs = cached_addrs.len(), + PEER_ADDR_RESPONSE_LIMIT, + "responding to Peers request using new and cached addresses", + ); + + Handler::Finished(Ok(Response::Peers(response_addrs))) + } + // `zcashd` returns requested transactions in a single batch of messages. // Other transaction or non-transaction messages can come before or after the batch. // After the transaction batch, `zcashd` sends `notfound` if any transactions are missing: @@ -251,6 +275,7 @@ impl Handler { ))) } } + // `zcashd` returns requested blocks in a single batch of messages. // Other blocks or non-blocks messages can come before or after the batch. // `zcashd` silently skips missing blocks, rather than sending a final `notfound` message. @@ -365,6 +390,10 @@ impl Handler { block_hashes(&items[..]).collect(), ))) } + (Handler::FindHeaders, Message::Headers(headers)) => { + Handler::Finished(Ok(Response::BlockHeaders(headers))) + } + (Handler::MempoolTransactionIds, Message::Inv(items)) if items.iter().all(|item| item.unmined_tx_id().is_some()) => { @@ -372,9 +401,7 @@ impl Handler { transaction_ids(&items).collect(), ))) } - (Handler::FindHeaders, Message::Headers(headers)) => { - Handler::Finished(Ok(Response::BlockHeaders(headers))) - } + // By default, messages are not responses. (state, msg) => { trace!(?msg, "did not interpret message as response"); @@ -385,6 +412,52 @@ impl Handler { ignored_msg } + + /// Adds `new_addrs` to the `cached_addrs` cache, then takes and returns `response_size` + /// addresses from that cache. + /// + /// `cached_addrs` can be empty if the cache is empty. `new_addrs` can be empty or `None` if + /// there are no new addresses. `response_size` can be zero or `None` if there is no response + /// needed. + fn update_addr_cache<'new>( + cached_addrs: &mut Vec, + new_addrs: impl IntoIterator, + response_size: impl Into>, + ) -> Vec { + // # Peer Set Reliability + // + // Newly received peers are added to the cache, so that we can use them if the connection + // doesn't respond to our getaddr requests. + // + // Add the new addresses to the end of the cache. + cached_addrs.extend(new_addrs); + + // # Security + // + // We limit how many peer addresses we take from each peer, so that our address book + // and outbound connections aren't controlled by a single peer (#1869). We randomly select + // peers, so the remote peer can't control which addresses we choose by changing the order + // in the messages they send. + let response_size = response_size.into().unwrap_or_default(); + + let mut temp_cache = Vec::new(); + std::mem::swap(cached_addrs, &mut temp_cache); + + // The response is fully shuffled, remaining is partially shuffled. + let (response, remaining) = temp_cache.partial_shuffle(&mut thread_rng(), response_size); + + // # Security + // + // The cache size is limited to avoid memory denial of service. + // + // It's ok to just partially shuffle the cache, because it doesn't actually matter which + // peers we drop. Having excess peers is rare, because most peers only send one large + // unsolicited peer message when they first connect. + *cached_addrs = remaining.to_vec(); + cached_addrs.truncate(MAX_ADDRS_IN_MESSAGE); + + response.to_vec() + } } #[derive(Debug)] @@ -468,13 +541,16 @@ where /// other state handling. pub(super) request_timer: Option>>, - /// A cached copy of the last unsolicited `addr` or `addrv2` message from this peer. + /// Unused peers from recent `addr` or `addrv2` messages from this peer. + /// Also holds the initial addresses sent in `version` messages, or guessed from the remote IP. + /// + /// When peers send solicited or unsolicited peer advertisements, Zebra puts them in this cache. /// - /// When Zebra requests peers, the cache is consumed and returned as a synthetic response. - /// This works around `zcashd`'s address response rate-limit. + /// When Zebra's components request peers, some cached peers are randomly selected, + /// consumed, and returned as a modified response. This works around `zcashd`'s address + /// response rate-limit. /// - /// Multi-peer `addr` or `addrv2` messages replace single-peer messages in the cache. - /// (`zcashd` also gossips its own address at regular intervals.) + /// The cache size is limited to avoid denial of service attacks. pub(super) cached_addrs: Vec, /// The `inbound` service, used to answer requests from this connection's peer. @@ -536,6 +612,7 @@ where .field("error_slot", &self.error_slot) .field("metrics_label", &self.metrics_label) .field("last_metrics_state", &self.last_metrics_state) + .field("last_overload_time", &self.last_overload_time) .finish() } } @@ -544,7 +621,7 @@ impl Connection where Tx: Sink + Unpin, { - /// Return a new connection from its channels, services, and shared state. + /// Return a new connection from its channels, services, shared state, and metadata. pub(crate) fn new( inbound_service: S, client_rx: futures::channel::mpsc::Receiver, @@ -552,6 +629,7 @@ where peer_tx: Tx, connection_tracker: ConnectionTracker, connection_info: Arc, + initial_cached_addrs: Vec, ) -> Self { let metrics_label = connection_info.connected_addr.get_transient_addr_label(); @@ -559,7 +637,7 @@ where connection_info, state: State::AwaitingRequest, request_timer: None, - cached_addrs: Vec::new(), + cached_addrs: initial_cached_addrs, svc: inbound_service, client_rx: client_rx.into(), error_slot, @@ -712,10 +790,10 @@ where // Add a metric for inbound responses to outbound requests. metrics::counter!( "zebra.net.in.responses", - 1, "command" => response.command(), "addr" => self.metrics_label.clone(), - ); + ) + .increment(1); } else { debug!(error = ?response, "error in peer response to Zebra request"); } @@ -780,7 +858,7 @@ where let request_msg = match self.state { State::AwaitingResponse { ref mut handler, .. - } => span.in_scope(|| handler.process_message(peer_msg)), + } => span.in_scope(|| handler.process_message(peer_msg, &mut self.cached_addrs)), _ => unreachable!("unexpected state after AwaitingResponse: {:?}, peer_msg: {:?}, client_receiver: {:?}", self.state, peer_msg, @@ -891,15 +969,15 @@ where let InProgressClientRequest { request, tx, span } = req; if tx.is_canceled() { - metrics::counter!("peer.canceled", 1); + metrics::counter!("peer.canceled").increment(1); debug!(state = %self.state, %request, "ignoring canceled request"); metrics::counter!( "zebra.net.out.requests.canceled", - 1, "command" => request.command(), "addr" => self.metrics_label.clone(), - ); + ) + .increment(1); self.update_state_metrics(format!("Out::Req::Canceled::{}", request.command())); return; @@ -910,10 +988,10 @@ where // Add a metric for outbound requests. metrics::counter!( "zebra.net.out.requests", - 1, "command" => request.command(), "addr" => self.metrics_label.clone(), - ); + ) + .increment(1); self.update_state_metrics(format!("Out::Req::{}", request.command())); let new_handler = match (&self.state, request) { @@ -929,16 +1007,21 @@ where self.client_rx ), - // Consume the cached addresses from the peer, - // to work-around a `zcashd` response rate-limit. + // Take some cached addresses from the peer connection. This address cache helps + // work-around a `zcashd` addr response rate-limit. (AwaitingRequest, Peers) if !self.cached_addrs.is_empty() => { - let cached_addrs = std::mem::take(&mut self.cached_addrs); + // Security: This method performs security-sensitive operations, see its comments + // for details. + let response_addrs = Handler::update_addr_cache(&mut self.cached_addrs, None, PEER_ADDR_RESPONSE_LIMIT); + debug!( - addrs = cached_addrs.len(), - "responding to Peers request using cached addresses", + response_addrs = response_addrs.len(), + remaining_addrs = self.cached_addrs.len(), + PEER_ADDR_RESPONSE_LIMIT, + "responding to Peers request using some cached addresses", ); - Ok(Handler::Finished(Ok(Response::Peers(cached_addrs)))) + Ok(Handler::Finished(Ok(Response::Peers(response_addrs)))) } (AwaitingRequest, Peers) => self .peer_tx @@ -1145,28 +1228,32 @@ where // Ignored, but consumed because it is technically a protocol error. Consumed } - // Zebra crawls the network proactively, to prevent - // peers from inserting data into our address book. - Message::Addr(ref addrs) => { - // Workaround `zcashd`'s `getaddr` response rate-limit - if addrs.len() > 1 { - // Always refresh the cache with multi-addr messages. - debug!(%msg, "caching unsolicited multi-addr message"); - self.cached_addrs = addrs.clone(); - Consumed - } else if addrs.len() == 1 && self.cached_addrs.len() <= 1 { - // Only refresh a cached single addr message with another single addr. - // (`zcashd` regularly advertises its own address.) - debug!(%msg, "caching unsolicited single addr message"); - self.cached_addrs = addrs.clone(); - Consumed - } else { - debug!( - %msg, - "ignoring unsolicited single addr message: already cached a multi-addr message" - ); - Consumed - } + + // # Security + // + // Zebra crawls the network proactively, and that's the only way peers get into our + // address book. This prevents peers from filling our address book with malicious peer + // addresses. + Message::Addr(ref new_addrs) => { + // # Peer Set Reliability + // + // We keep a list of the unused peer addresses sent by each connection, to work + // around `zcashd`'s `getaddr` response rate-limit. + let no_response = + Handler::update_addr_cache(&mut self.cached_addrs, new_addrs, None); + assert_eq!( + no_response, + Vec::new(), + "peers unexpectedly taken from cache" + ); + + debug!( + new_addrs = new_addrs.len(), + cached_addrs = self.cached_addrs.len(), + "adding unsolicited addresses to cached addresses", + ); + + Consumed } Message::Tx(ref transaction) => Request::PushTransaction(transaction.clone()).into(), Message::Inv(ref items) => match &items[..] { @@ -1273,10 +1360,10 @@ where // Add a metric for inbound requests metrics::counter!( "zebra.net.in.requests", - 1, "command" => req.command(), "addr" => self.metrics_label.clone(), - ); + ) + .increment(1); self.update_state_metrics(format!("In::Req::{}", req.command())); // Give the inbound service time to clear its queue, @@ -1289,11 +1376,15 @@ where // // // The inbound service must be called immediately after a buffer slot is reserved. + // + // The inbound service never times out in readiness, because the load shed layer is always + // ready, and returns an error in response to the request instead. if self.svc.ready().await.is_err() { self.fail_with(PeerError::ServiceShutdown).await; return; } + // Inbound service request timeouts are handled by the timeout layer in `start::start()`. let rsp = match self.svc.call(req.clone()).await { Err(e) => { if e.is::() { @@ -1340,10 +1431,10 @@ where // Add a metric for outbound responses to inbound requests metrics::counter!( "zebra.net.out.responses", - 1, "command" => rsp.command(), "addr" => self.metrics_label.clone(), - ); + ) + .increment(1); self.update_state_metrics(format!("In::Rsp::{}", rsp.command())); // TODO: split response handler into its own method @@ -1479,9 +1570,9 @@ where if thread_rng().gen::() < drop_connection_probability { if matches!(error, PeerError::Overloaded) { - metrics::counter!("pool.closed.loadshed", 1); + metrics::counter!("pool.closed.loadshed").increment(1); } else { - metrics::counter!("pool.closed.inbound.timeout", 1); + metrics::counter!("pool.closed.inbound.timeout").increment(1); } tracing::info!( @@ -1503,9 +1594,9 @@ where self.update_state_metrics(format!("In::Req::{}/Rsp::{error}::Ignored", req.command())); if matches!(error, PeerError::Overloaded) { - metrics::counter!("pool.ignored.loadshed", 1); + metrics::counter!("pool.ignored.loadshed").increment(1); } else { - metrics::counter!("pool.ignored.inbound.timeout", 1); + metrics::counter!("pool.ignored.inbound.timeout").increment(1); } } } @@ -1568,12 +1659,12 @@ where self.erase_state_metrics(); // Set the new state - metrics::increment_gauge!( + metrics::gauge!( "zebra.net.connection.state", - 1.0, "command" => current_metrics_state.clone(), "addr" => self.metrics_label.clone(), - ); + ) + .increment(1.0); self.last_metrics_state = Some(current_metrics_state); } @@ -1583,10 +1674,10 @@ where if let Some(last_metrics_state) = self.last_metrics_state.take() { metrics::gauge!( "zebra.net.connection.state", - 0.0, "command" => last_metrics_state, "addr" => self.metrics_label.clone(), - ); + ) + .set(0.0); } } diff --git a/zebra-network/src/peer/connection/tests.rs b/zebra-network/src/peer/connection/tests.rs index b22391502e4..bc21858ced8 100644 --- a/zebra-network/src/peer/connection/tests.rs +++ b/zebra-network/src/peer/connection/tests.rs @@ -83,6 +83,7 @@ fn new_test_connection() -> ( peer_tx, ActiveConnectionCounter::new_counter().track_connection(), Arc::new(connection_info), + Vec::new(), ); ( diff --git a/zebra-network/src/peer/connection/tests/prop.rs b/zebra-network/src/peer/connection/tests/prop.rs index 3c4b2d51ca2..f633aea585a 100644 --- a/zebra-network/src/peer/connection/tests/prop.rs +++ b/zebra-network/src/peer/connection/tests/prop.rs @@ -7,7 +7,7 @@ use futures::{ sink::SinkMapErr, SinkExt, StreamExt, }; -use proptest::prelude::*; +use proptest::{collection, prelude::*}; use tracing::Span; use zebra_chain::{ @@ -18,7 +18,12 @@ use zebra_chain::{ use zebra_test::mock_service::{MockService, PropTestAssertion}; use crate::{ - peer::{connection::Connection, ClientRequest, ErrorSlot}, + constants::{MAX_ADDRS_IN_MESSAGE, PEER_ADDR_RESPONSE_LIMIT}, + meta_addr::MetaAddr, + peer::{ + connection::{Connection, Handler}, + ClientRequest, ErrorSlot, + }, protocol::external::Message, protocol::internal::InventoryResponse, Request, Response, SharedPeerError, @@ -129,6 +134,41 @@ proptest! { Ok(()) })?; } + + /// This test makes sure that Zebra's per-connection peer cache is updated correctly. + #[test] + fn cache_is_updated_correctly( + mut cached_addrs in collection::vec(MetaAddr::gossiped_strategy(), 0..=MAX_ADDRS_IN_MESSAGE), + new_addrs in collection::vec(MetaAddr::gossiped_strategy(), 0..=MAX_ADDRS_IN_MESSAGE), + response_size in 0..=PEER_ADDR_RESPONSE_LIMIT, + ) { + let _init_guard = zebra_test::init(); + + let old_cached_addrs = cached_addrs.clone(); + + let response = Handler::update_addr_cache(&mut cached_addrs, &new_addrs, response_size); + + prop_assert!(cached_addrs.len() <= MAX_ADDRS_IN_MESSAGE, "cache has a limited size"); + prop_assert!(response.len() <= response_size, "response has a limited size"); + + prop_assert!(response.len() <= old_cached_addrs.len() + new_addrs.len(), "no duplicate or made up addresses in response"); + prop_assert!(cached_addrs.len() <= old_cached_addrs.len() + new_addrs.len(), "no duplicate or made up addresses in cache"); + + if old_cached_addrs.len() + new_addrs.len() >= response_size { + // If we deduplicate addresses, this check should fail and must be changed + prop_assert_eq!(response.len(), response_size, "response gets addresses before cache does"); + } else { + prop_assert!(response.len() < response_size, "response gets all addresses if there is no excess"); + } + + if old_cached_addrs.len() + new_addrs.len() <= response_size { + prop_assert_eq!(cached_addrs.len(), 0, "cache is empty if there are no excess addresses"); + } else { + // If we deduplicate addresses, this check should fail and must be changed + prop_assert_ne!(cached_addrs.len(), 0, "cache gets excess addresses"); + } + + } } /// Creates a new [`Connection`] instance for property tests. diff --git a/zebra-network/src/peer/error.rs b/zebra-network/src/peer/error.rs index 6263fb56119..c40c34b1d1d 100644 --- a/zebra-network/src/peer/error.rs +++ b/zebra-network/src/peer/error.rs @@ -82,6 +82,10 @@ pub enum PeerError { #[error("Internal services over capacity")] Overloaded, + /// There are no ready remote peers. + #[error("No ready peers available")] + NoReadyPeers, + /// This peer request's caused an internal service timeout, so the connection was dropped /// to shed load or prevent attacks. #[error("Internal services timed out")] @@ -147,6 +151,7 @@ impl PeerError { PeerError::Serialization(inner) => format!("Serialization({inner})").into(), PeerError::DuplicateHandshake => "DuplicateHandshake".into(), PeerError::Overloaded => "Overloaded".into(), + PeerError::NoReadyPeers => "NoReadyPeers".into(), PeerError::InboundTimeout => "InboundTimeout".into(), PeerError::ServiceShutdown => "ServiceShutdown".into(), PeerError::NotFoundResponse(_) => "NotFoundResponse".into(), diff --git a/zebra-network/src/peer/handshake.rs b/zebra-network/src/peer/handshake.rs index e0d981951ad..161c25f0ac5 100644 --- a/zebra-network/src/peer/handshake.rs +++ b/zebra-network/src/peer/handshake.rs @@ -29,7 +29,7 @@ use tracing_futures::Instrument; use zebra_chain::{ chain_tip::{ChainTip, NoChainTip}, parameters::Network, - serialization::SerializationError, + serialization::{DateTime32, SerializationError}, }; use crate::{ @@ -556,13 +556,14 @@ where } } -/// Negotiate the Zcash network protocol version with the remote peer -/// at `connected_addr`, using the connection `peer_conn`. +/// Negotiate the Zcash network protocol version with the remote peer at `connected_addr`, using +/// the connection `peer_conn`. /// -/// We split `Handshake` into its components before calling this function, -/// to avoid infectious `Sync` bounds on the returned future. +/// We split `Handshake` into its components before calling this function, to avoid infectious +/// `Sync` bounds on the returned future. /// -/// Returns the [`VersionMessage`] sent by the remote peer. +/// Returns the [`VersionMessage`] sent by the remote peer, and the [`Version`] negotiated with the +/// remote peer, inside a [`ConnectionInfo`] struct. #[allow(clippy::too_many_arguments)] pub async fn negotiate_version( peer_conn: &mut Framed, @@ -573,7 +574,7 @@ pub async fn negotiate_version( our_services: PeerServices, relay: bool, mut minimum_peer_version: MinimumPeerVersion, -) -> Result +) -> Result, HandshakeError> where PeerTransport: AsyncRead + AsyncWrite + Unpin + Send + 'static, { @@ -729,6 +730,7 @@ where // Reject connections to peers on old versions, because they might not know about all // network upgrades and could lead to chain forks or slower block propagation. let min_version = minimum_peer_version.current(); + if remote.version < min_version { debug!( remote_ip = ?their_addr, @@ -741,52 +743,59 @@ where // the value is the number of rejected handshakes, by peer IP and protocol version metrics::counter!( "zcash.net.peers.obsolete", - 1, "remote_ip" => their_addr.to_string(), "remote_version" => remote.version.to_string(), "min_version" => min_version.to_string(), "user_agent" => remote.user_agent.clone(), - ); + ) + .increment(1); // the value is the remote version of the most recent rejected handshake from each peer metrics::gauge!( "zcash.net.peers.version.obsolete", - remote.version.0 as f64, "remote_ip" => their_addr.to_string(), - ); + ) + .set(remote.version.0 as f64); // Disconnect if peer is using an obsolete version. - Err(HandshakeError::ObsoleteVersion(remote.version))?; - } else { - let negotiated_version = min(constants::CURRENT_NETWORK_PROTOCOL_VERSION, remote.version); - - debug!( - remote_ip = ?their_addr, - ?remote.version, - ?negotiated_version, - ?min_version, - ?remote.user_agent, - "negotiated network protocol version with peer", - ); + return Err(HandshakeError::ObsoleteVersion(remote.version)); + } - // the value is the number of connected handshakes, by peer IP and protocol version - metrics::counter!( - "zcash.net.peers.connected", - 1, - "remote_ip" => their_addr.to_string(), - "remote_version" => remote.version.to_string(), - "negotiated_version" => negotiated_version.to_string(), - "min_version" => min_version.to_string(), - "user_agent" => remote.user_agent.clone(), - ); + let negotiated_version = min(constants::CURRENT_NETWORK_PROTOCOL_VERSION, remote.version); + + // Limit containing struct size, and avoid multiple duplicates of 300+ bytes of data. + let connection_info = Arc::new(ConnectionInfo { + connected_addr: *connected_addr, + remote, + negotiated_version, + }); + + debug!( + remote_ip = ?their_addr, + ?connection_info.remote.version, + ?negotiated_version, + ?min_version, + ?connection_info.remote.user_agent, + "negotiated network protocol version with peer", + ); - // the value is the remote version of the most recent connected handshake from each peer - metrics::gauge!( - "zcash.net.peers.version.connected", - remote.version.0 as f64, - "remote_ip" => their_addr.to_string(), - ); - } + // the value is the number of connected handshakes, by peer IP and protocol version + metrics::counter!( + "zcash.net.peers.connected", + "remote_ip" => their_addr.to_string(), + "remote_version" => connection_info.remote.version.to_string(), + "negotiated_version" => negotiated_version.to_string(), + "min_version" => min_version.to_string(), + "user_agent" => connection_info.remote.user_agent.clone(), + ) + .increment(1); + + // the value is the remote version of the most recent connected handshake from each peer + metrics::gauge!( + "zcash.net.peers.version.connected", + "remote_ip" => their_addr.to_string(), + ) + .set(connection_info.remote.version.0 as f64); peer_conn.send(Message::Verack).await?; @@ -812,7 +821,7 @@ where } } - Ok(remote) + Ok(connection_info) } /// A handshake request. @@ -894,7 +903,7 @@ where .finish(), ); - let remote = negotiate_version( + let connection_info = negotiate_version( &mut peer_conn, &connected_addr, config, @@ -906,56 +915,25 @@ where ) .await?; - let remote_canonical_addr = remote.address_from.addr(); - let remote_services = remote.services; + let remote_services = connection_info.remote.services; - // If we've learned potential peer addresses from an inbound - // connection or handshake, add those addresses to our address book. - // - // # Security - // - // We must handle alternate addresses separately from connected - // addresses. Otherwise, malicious peers could interfere with the - // address book state of other peers by providing their addresses in - // `Version` messages. - // - // New alternate peer address and peer responded updates are rate-limited because: - // - opening connections is rate-limited - // - we only send these messages once per handshake - let alternate_addrs = connected_addr.get_alternate_addrs(remote_canonical_addr); - for alt_addr in alternate_addrs { - let alt_addr = MetaAddr::new_alternate(alt_addr, &remote_services); - // awaiting a local task won't hang - let _ = address_book_updater.send(alt_addr).await; - } - - // The handshake succeeded: update the peer status from AttemptPending to Responded + // The handshake succeeded: update the peer status from AttemptPending to Responded, + // and send initial connection info. if let Some(book_addr) = connected_addr.get_address_book_addr() { // the collector doesn't depend on network activity, // so this await should not hang let _ = address_book_updater - .send(MetaAddr::new_responded(book_addr, &remote_services)) + .send(MetaAddr::new_connected(book_addr, &remote_services)) .await; } - // Set the connection's version to the minimum of the received version or our own. - let negotiated_version = - std::cmp::min(remote.version, constants::CURRENT_NETWORK_PROTOCOL_VERSION); - - // Limit containing struct size, and avoid multiple duplicates of 300+ bytes of data. - let connection_info = Arc::new(ConnectionInfo { - connected_addr, - remote, - negotiated_version, - }); - // Reconfigure the codec to use the negotiated version. // // TODO: The tokio documentation says not to do this while any frames are still being processed. // Since we don't know that here, another way might be to release the tcp // stream from the unversioned Framed wrapper and construct a new one with a versioned codec. let bare_codec = peer_conn.codec_mut(); - bare_codec.reconfigure_version(negotiated_version); + bare_codec.reconfigure_version(connection_info.negotiated_version); debug!("constructing client, spawning server"); @@ -976,10 +954,10 @@ where // Add a metric for outbound messages. metrics::counter!( "zcash.net.out.messages", - 1, "command" => msg.command(), "addr" => connected_addr.get_transient_addr_label(), - ); + ) + .increment(1); // We need to use future::ready rather than an async block here, // because we need the sink to be Unpin, and the With // returned by .with is Unpin only if Fut is Unpin, and the @@ -1013,10 +991,10 @@ where Ok(msg) => { metrics::counter!( "zcash.net.in.messages", - 1, "command" => msg.command(), "addr" => connected_addr.get_transient_addr_label(), - ); + ) + .increment(1); // # Security // @@ -1026,10 +1004,10 @@ where Err(err) => { metrics::counter!( "zebra.net.in.errors", - 1, "error" => err.to_string(), "addr" => connected_addr.get_transient_addr_label(), - ); + ) + .increment(1); // # Security // @@ -1056,6 +1034,33 @@ where }) .boxed(); + // If we've learned potential peer addresses from the inbound connection remote address + // or the handshake version message, add those addresses to the peer cache for this + // peer. + // + // # Security + // + // We can't add these alternate addresses directly to the address book. If we did, + // malicious peers could interfere with the address book state of other peers by + // providing their addresses in `Version` messages. Or they could fill the address book + // with fake addresses. + // + // These peer addresses are rate-limited because: + // - opening connections is rate-limited + // - these addresses are put in the peer address cache + // - the peer address cache is only used when Zebra requests addresses from that peer + let remote_canonical_addr = connection_info.remote.address_from.addr(); + let alternate_addrs = connected_addr + .get_alternate_addrs(remote_canonical_addr) + .map(|addr| { + // Assume the connecting node is a server node, and it's available now. + MetaAddr::new_gossiped_meta_addr( + addr, + PeerServices::NODE_NETWORK, + DateTime32::now(), + ) + }); + let server = Connection::new( inbound_service, server_rx, @@ -1063,6 +1068,7 @@ where peer_tx, connection_tracker, connection_info.clone(), + alternate_addrs.collect(), ); let connection_task = tokio::spawn( @@ -1075,7 +1081,6 @@ where let heartbeat_task = tokio::spawn( send_periodic_heartbeats_with_shutdown_handle( connected_addr, - remote_services, shutdown_rx, server_tx.clone(), address_book_updater.clone(), @@ -1213,7 +1218,6 @@ pub(crate) async fn register_inventory_status( /// Returning from this function terminates the connection's heartbeat task. async fn send_periodic_heartbeats_with_shutdown_handle( connected_addr: ConnectedAddr, - remote_services: PeerServices, shutdown_rx: oneshot::Receiver, server_tx: futures::channel::mpsc::Sender, heartbeat_ts_collector: tokio::sync::mpsc::Sender, @@ -1222,7 +1226,6 @@ async fn send_periodic_heartbeats_with_shutdown_handle( let heartbeat_run_loop = send_periodic_heartbeats_run_loop( connected_addr, - remote_services, server_tx, heartbeat_ts_collector.clone(), ); @@ -1246,7 +1249,6 @@ async fn send_periodic_heartbeats_with_shutdown_handle( PeerError::ClientCancelledHeartbeatTask, &heartbeat_ts_collector, &connected_addr, - &remote_services, ) .await } @@ -1256,7 +1258,6 @@ async fn send_periodic_heartbeats_with_shutdown_handle( PeerError::ClientDropped, &heartbeat_ts_collector, &connected_addr, - &remote_services, ) .await } @@ -1275,7 +1276,6 @@ async fn send_periodic_heartbeats_with_shutdown_handle( /// See `send_periodic_heartbeats_with_shutdown_handle` for details. async fn send_periodic_heartbeats_run_loop( connected_addr: ConnectedAddr, - remote_services: PeerServices, mut server_tx: futures::channel::mpsc::Sender, heartbeat_ts_collector: tokio::sync::mpsc::Sender, ) -> Result<(), BoxError> { @@ -1294,13 +1294,7 @@ async fn send_periodic_heartbeats_run_loop( // We've reached another heartbeat interval without // shutting down, so do a heartbeat request. let heartbeat = send_one_heartbeat(&mut server_tx); - heartbeat_timeout( - heartbeat, - &heartbeat_ts_collector, - &connected_addr, - &remote_services, - ) - .await?; + heartbeat_timeout(heartbeat, &heartbeat_ts_collector, &connected_addr).await?; // # Security // @@ -1312,7 +1306,7 @@ async fn send_periodic_heartbeats_run_loop( // the collector doesn't depend on network activity, // so this await should not hang let _ = heartbeat_ts_collector - .send(MetaAddr::new_responded(book_addr, &remote_services)) + .send(MetaAddr::new_responded(book_addr)) .await; } } @@ -1375,29 +1369,16 @@ async fn heartbeat_timeout( fut: F, address_book_updater: &tokio::sync::mpsc::Sender, connected_addr: &ConnectedAddr, - remote_services: &PeerServices, ) -> Result where F: Future>, { let t = match timeout(constants::HEARTBEAT_INTERVAL, fut).await { Ok(inner_result) => { - handle_heartbeat_error( - inner_result, - address_book_updater, - connected_addr, - remote_services, - ) - .await? + handle_heartbeat_error(inner_result, address_book_updater, connected_addr).await? } Err(elapsed) => { - handle_heartbeat_error( - Err(elapsed), - address_book_updater, - connected_addr, - remote_services, - ) - .await? + handle_heartbeat_error(Err(elapsed), address_book_updater, connected_addr).await? } }; @@ -1409,7 +1390,6 @@ async fn handle_heartbeat_error( result: Result, address_book_updater: &tokio::sync::mpsc::Sender, connected_addr: &ConnectedAddr, - remote_services: &PeerServices, ) -> Result where E: std::fmt::Debug, @@ -1427,7 +1407,7 @@ where // - after the first error or shutdown, the peer is disconnected if let Some(book_addr) = connected_addr.get_address_book_addr() { let _ = address_book_updater - .send(MetaAddr::new_errored(book_addr, *remote_services)) + .send(MetaAddr::new_errored(book_addr, None)) .await; } Err(err) @@ -1440,13 +1420,12 @@ async fn handle_heartbeat_shutdown( peer_error: PeerError, address_book_updater: &tokio::sync::mpsc::Sender, connected_addr: &ConnectedAddr, - remote_services: &PeerServices, ) -> Result<(), BoxError> { tracing::debug!(?peer_error, "client shutdown, shutting down heartbeat"); if let Some(book_addr) = connected_addr.get_address_book_addr() { let _ = address_book_updater - .send(MetaAddr::new_shutdown(book_addr, *remote_services)) + .send(MetaAddr::new_shutdown(book_addr)) .await; } diff --git a/zebra-network/src/peer/load_tracked_client.rs b/zebra-network/src/peer/load_tracked_client.rs index 49d1430720a..6211e6cd52b 100644 --- a/zebra-network/src/peer/load_tracked_client.rs +++ b/zebra-network/src/peer/load_tracked_client.rs @@ -20,6 +20,7 @@ use crate::{ /// A client service wrapper that keeps track of its load. /// /// It also keeps track of the peer's reported protocol version. +#[derive(Debug)] pub struct LoadTrackedClient { /// A service representing a connected peer, wrapped in a load tracker. service: PeakEwma, diff --git a/zebra-network/src/peer_set/candidate_set.rs b/zebra-network/src/peer_set/candidate_set.rs index f951bda5b9b..b347508916b 100644 --- a/zebra-network/src/peer_set/candidate_set.rs +++ b/zebra-network/src/peer_set/candidate_set.rs @@ -1,6 +1,6 @@ //! Candidate peer selection for outbound connections using the [`CandidateSet`]. -use std::{cmp::min, sync::Arc}; +use std::{any::type_name, cmp::min, sync::Arc}; use chrono::Utc; use futures::stream::{FuturesUnordered, StreamExt}; @@ -26,13 +26,12 @@ mod tests; /// /// 1. [`Responded`] peers, which we have had an outbound connection to. /// 2. [`NeverAttemptedGossiped`] peers, which we learned about from other peers -/// but have never connected to. -/// 3. [`NeverAttemptedAlternate`] peers, canonical addresses which we learned -/// from the [`Version`] messages of inbound and outbound connections, -/// but have never connected to. -/// 4. [`Failed`] peers, which failed a connection attempt, or had an error +/// but have never connected to. This includes gossiped peers, DNS seeder peers, +/// cached peers, canonical addresses from the [`Version`] messages of inbound +/// and outbound connections, and remote IP addresses of inbound connections. +/// 3. [`Failed`] peers, which failed a connection attempt, or had an error /// during an outbound connection. -/// 5. [`AttemptPending`] peers, which we've recently queued for a connection. +/// 4. [`AttemptPending`] peers, which we've recently queued for a connection. /// /// Never attempted peers are always available for connection. /// @@ -68,8 +67,7 @@ mod tests; /// │ disjoint `PeerAddrState`s ▼ │ /// │ ┌─────────────┐ ┌─────────────────────────┐ ┌─────────────┐ │ /// │ │ `Responded` │ │`NeverAttemptedGossiped` │ │ `Failed` │ │ -/// ┌┼▶│ Peers │ │`NeverAttemptedAlternate`│ │ Peers │◀┼┐ -/// ││ │ │ │ Peers │ │ │ ││ +/// ┌┼▶│ Peers │ │ Peers │ │ Peers │◀┼┐ /// ││ └─────────────┘ └─────────────────────────┘ └─────────────┘ ││ /// ││ │ │ │ ││ /// ││ #1 oldest_first #2 newest_first #3 oldest_first ││ @@ -77,8 +75,8 @@ mod tests; /// ││ ▼ ││ /// ││ Λ ││ /// ││ ╱ ╲ filter by ││ -/// ││ ▕ ▏ is_ready_for_connection_attempt ││ -/// ││ ╲ ╱ to remove recent `Responded`, ││ +/// ││ ▕ ▏ is_ready_for_connection_attempt ││ +/// ││ ╲ ╱ to remove recent `Responded`, ││ /// ││ V `AttemptPending`, and `Failed` peers ││ /// ││ │ ││ /// ││ │ try outbound connection, ││ @@ -105,7 +103,8 @@ mod tests; /// │ │ /// │ ▼ /// │┌───────────────────────────────────────┐ -/// ││ every time we receive a peer message: │ +/// ││ when connection succeeds, and every │ +/// ││ time we receive a peer heartbeat: │ /// └│ * update state to `Responded` │ /// │ * update last_response to now() │ /// └───────────────────────────────────────┘ @@ -114,30 +113,52 @@ mod tests; /// [`Responded`]: crate::PeerAddrState::Responded /// [`Version`]: crate::protocol::external::types::Version /// [`NeverAttemptedGossiped`]: crate::PeerAddrState::NeverAttemptedGossiped -/// [`NeverAttemptedAlternate`]: crate::PeerAddrState::NeverAttemptedAlternate /// [`Failed`]: crate::PeerAddrState::Failed /// [`AttemptPending`]: crate::PeerAddrState::AttemptPending // TODO: // * show all possible transitions between Attempt/Responded/Failed, // except Failed -> Responded is invalid, must go through Attempt -// * for now, seed peers go straight to handshaking and responded, -// but we'll fix that once we add the Seed state -// When we add the Seed state: -// * show that seed peers that transition to other never attempted -// states are already in the address book +// +// Note: the CandidateSet can't be cloned, because there needs to be a single +// instance of its timers, so that rate limits are enforced correctly. pub(crate) struct CandidateSet where S: Service + Send, S::Future: Send + 'static, { - // Correctness: the address book must be private, - // so all operations are performed on a blocking thread (see #1976). + /// The outbound address book for this peer set. + /// + /// # Correctness + /// + /// The address book must be private, so all operations are performed on a blocking thread + /// (see #1976). address_book: Arc>, + + /// The peer set used to crawl the network for peers. peer_service: S, + + /// A timer that enforces a rate-limit on new outbound connections. min_next_handshake: Instant, + + /// A timer that enforces a rate-limit on peer set requests for more peers. min_next_crawl: Instant, } +impl std::fmt::Debug for CandidateSet +where + S: Service + Send, + S::Future: Send + 'static, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("CandidateSet") + .field("address_book", &self.address_book) + .field("peer_service", &type_name::()) + .field("min_next_handshake", &self.min_next_handshake) + .field("min_next_crawl", &self.min_next_crawl) + .finish() + } +} + impl CandidateSet where S: Service + Send, @@ -372,7 +393,8 @@ where /// Zebra resists distributed denial of service attacks by making sure that /// new peer connections are initiated at least /// [`MIN_OUTBOUND_PEER_CONNECTION_INTERVAL`][constants::MIN_OUTBOUND_PEER_CONNECTION_INTERVAL] - /// apart. + /// apart. If `next()` has recently provided a peer, then its future will sleep + /// until the rate-limit has passed. /// /// [`Responded`]: crate::PeerAddrState::Responded pub async fn next(&mut self) -> Option { @@ -414,6 +436,8 @@ where } /// Returns the address book for this `CandidateSet`. + #[cfg(any(test, feature = "proptest-impl"))] + #[allow(dead_code)] pub async fn address_book(&self) -> Arc> { self.address_book.clone() } @@ -445,10 +469,8 @@ fn validate_addrs( // TODO: // We should eventually implement these checks in this function: // - Zebra should ignore peers that are older than 3 weeks (part of #1865) - // - Zebra should count back 3 weeks from the newest peer timestamp sent - // by the other peer, to compensate for clock skew - // - Zebra should limit the number of addresses it uses from a single Addrs - // response (#1869) + // - Zebra should count back 3 weeks from the newest peer timestamp sent + // by the other peer, to compensate for clock skew let mut addrs: Vec<_> = addrs.into_iter().collect(); diff --git a/zebra-network/src/peer_set/candidate_set/tests/prop.rs b/zebra-network/src/peer_set/candidate_set/tests/prop.rs index e5201e046ba..d434e9866e2 100644 --- a/zebra-network/src/peer_set/candidate_set/tests/prop.rs +++ b/zebra-network/src/peer_set/candidate_set/tests/prop.rs @@ -21,7 +21,6 @@ use crate::{ canonical_peer_addr, constants::{DEFAULT_MAX_CONNS_PER_IP, MIN_OUTBOUND_PEER_CONNECTION_INTERVAL}, meta_addr::{MetaAddr, MetaAddrChange}, - protocol::types::PeerServices, AddressBook, BoxError, Request, Response, }; @@ -107,7 +106,7 @@ proptest! { let _guard = runtime.enter(); let peers = peers.into_iter().map(|(ip, port)| { - MetaAddr::new_alternate(canonical_peer_addr(SocketAddr::new(ip, port)), &PeerServices::NODE_NETWORK) + MetaAddr::new_initial_peer(canonical_peer_addr(SocketAddr::new(ip, port))) }).collect::>(); let peer_service = tower::service_fn(|_| async { diff --git a/zebra-network/src/peer_set/candidate_set/tests/vectors.rs b/zebra-network/src/peer_set/candidate_set/tests/vectors.rs index 261fb2ff487..e9811796b8f 100644 --- a/zebra-network/src/peer_set/candidate_set/tests/vectors.rs +++ b/zebra-network/src/peer_set/candidate_set/tests/vectors.rs @@ -177,7 +177,7 @@ fn candidate_set_updates_are_rate_limited() { }); } -/// Test that a call to [`CandidateSet::update`] after a call to [`CandidateSet::update_inital`] is +/// Test that a call to [`CandidateSet::update`] after a call to [`CandidateSet::update_initial`] is /// rate limited. #[test] fn candidate_set_update_after_update_initial_is_rate_limited() { diff --git a/zebra-network/src/peer_set/initialize.rs b/zebra-network/src/peer_set/initialize.rs index 6919b2ad09c..e9ba3d38085 100644 --- a/zebra-network/src/peer_set/initialize.rs +++ b/zebra-network/src/peer_set/initialize.rs @@ -29,7 +29,6 @@ use tokio_stream::wrappers::IntervalStream; use tower::{ buffer::Buffer, discover::Change, layer::Layer, util::BoxService, Service, ServiceExt, }; -use tracing::Span; use tracing_futures::Instrument; use zebra_chain::{chain_tip::ChainTip, diagnostic::task::WaitForPanics}; @@ -197,7 +196,7 @@ where config.clone(), outbound_connector.clone(), peerset_tx.clone(), - address_book_updater, + address_book_updater.clone(), ); let initial_peers_join = tokio::spawn(initial_peers_fut.in_current_span()); @@ -242,6 +241,7 @@ where outbound_connector, peerset_tx, active_outbound_connections, + address_book_updater, ); let crawl_guard = tokio::spawn(crawl_fut.in_current_span()); @@ -740,6 +740,7 @@ enum CrawlerAction { /// /// Uses `active_outbound_connections` to limit the number of active outbound connections /// across both the initial peers and crawler. The limit is based on `config`. +#[allow(clippy::too_many_arguments)] #[instrument( skip( config, @@ -749,6 +750,7 @@ enum CrawlerAction { outbound_connector, peerset_tx, active_outbound_connections, + address_book_updater, ), fields( new_peer_interval = ?config.crawl_new_peer_interval, @@ -762,6 +764,7 @@ async fn crawl_and_dial( outbound_connector: C, peerset_tx: futures::channel::mpsc::Sender, mut active_outbound_connections: ActiveConnectionCounter, + address_book_updater: tokio::sync::mpsc::Sender, ) -> Result<(), BoxError> where C: Service< @@ -783,8 +786,6 @@ where "starting the peer address crawler", ); - let address_book = candidates.address_book().await; - // # Concurrency // // Allow tasks using the candidate set to be spawned, so they can run concurrently. @@ -812,8 +813,7 @@ where // To avoid hangs and starvation, the crawler must spawn a separate task for each crawl // and handshake, so they can make progress independently (and avoid deadlocking each other). loop { - metrics::gauge!( - "crawler.in_flight_handshakes", + metrics::gauge!("crawler.in_flight_handshakes").set( handshakes .len() .checked_sub(1) @@ -857,18 +857,22 @@ where let candidates = candidates.clone(); let outbound_connector = outbound_connector.clone(); let peerset_tx = peerset_tx.clone(); - let address_book = address_book.clone(); + let address_book_updater = address_book_updater.clone(); let demand_tx = demand_tx.clone(); // Increment the connection count before we spawn the connection. let outbound_connection_tracker = active_outbound_connections.track_connection(); - debug!( - outbound_connections = ?active_outbound_connections.update_count(), - "opening an outbound peer connection" - ); + let outbound_connections = active_outbound_connections.update_count(); + debug!(?outbound_connections, "opening an outbound peer connection"); // Spawn each handshake or crawl into an independent task, so handshakes can make // progress while crawls are running. + // + // # Concurrency + // + // The peer crawler must be able to make progress even if some handshakes are + // rate-limited. So the async mutex and next peer timeout are awaited inside the + // spawned task. let handshake_or_crawl_handle = tokio::spawn( async move { // Try to get the next available peer for a handshake. @@ -885,8 +889,9 @@ where candidate, outbound_connector, outbound_connection_tracker, + outbound_connections, peerset_tx, - address_book, + address_book_updater, demand_tx, ) .await?; @@ -896,7 +901,7 @@ where // There weren't any peers, so try to get more peers. debug!("demand for peers but no available candidates"); - crawl(candidates, demand_tx).await?; + crawl(candidates, demand_tx, false).await?; Ok(DemandCrawlFinished) } @@ -910,6 +915,7 @@ where Ok(TimerCrawl { tick }) => { let candidates = candidates.clone(); let demand_tx = demand_tx.clone(); + let should_always_dial = active_outbound_connections.update_count() == 0; let crawl_handle = tokio::spawn( async move { @@ -918,7 +924,7 @@ where "crawling for more peers in response to the crawl timer" ); - crawl(candidates, demand_tx).await?; + crawl(candidates, demand_tx, should_always_dial).await?; Ok(TimerCrawlFinished) } @@ -957,11 +963,12 @@ where } /// Try to get more peers using `candidates`, then queue a connection attempt using `demand_tx`. -/// If there were no new peers, the connection attempt is skipped. +/// If there were no new peers and `should_always_dial` is false, the connection attempt is skipped. #[instrument(skip(candidates, demand_tx))] async fn crawl( candidates: Arc>>, mut demand_tx: futures::channel::mpsc::Sender, + should_always_dial: bool, ) -> Result<(), BoxError> where S: Service + Send + Sync + 'static, @@ -976,7 +983,7 @@ where result }; let more_peers = match result { - Ok(more_peers) => more_peers, + Ok(more_peers) => more_peers.or_else(|| should_always_dial.then_some(MorePeers)), Err(e) => { info!( ?e, @@ -1017,16 +1024,18 @@ where #[instrument(skip( outbound_connector, outbound_connection_tracker, + outbound_connections, peerset_tx, - address_book, + address_book_updater, demand_tx ))] async fn dial( candidate: MetaAddr, mut outbound_connector: C, outbound_connection_tracker: ConnectionTracker, + outbound_connections: usize, mut peerset_tx: futures::channel::mpsc::Sender, - address_book: Arc>, + address_book_updater: tokio::sync::mpsc::Sender, mut demand_tx: futures::channel::mpsc::Sender, ) -> Result<(), BoxError> where @@ -1039,6 +1048,10 @@ where + 'static, C::Future: Send + 'static, { + // If Zebra only has a few connections, we log connection failures at info level, + // so users can diagnose and fix the problem. This defines the threshold for info logs. + const MAX_CONNECTIONS_FOR_INFO_LOG: usize = 5; + // # Correctness // // To avoid hangs, the dialer must only await: @@ -1067,8 +1080,14 @@ where } // The connection was never opened, or it failed the handshake and was dropped. Err(error) => { - debug!(?error, ?candidate.addr, "failed to make outbound connection to peer"); - report_failed(address_book.clone(), candidate).await; + // Silence verbose info logs in production, but keep logs if the number of connections is low. + // Also silence them completely in tests. + if outbound_connections <= MAX_CONNECTIONS_FOR_INFO_LOG && !cfg!(test) { + info!(?error, ?candidate.addr, "failed to make outbound connection to peer"); + } else { + debug!(?error, ?candidate.addr, "failed to make outbound connection to peer"); + } + report_failed(address_book_updater.clone(), candidate).await; // The demand signal that was taken out of the queue to attempt to connect to the // failed candidate never turned into a connection, so add it back. @@ -1088,25 +1107,15 @@ where Ok(()) } -/// Mark `addr` as a failed peer in `address_book`. -#[instrument(skip(address_book))] -async fn report_failed(address_book: Arc>, addr: MetaAddr) { - let addr = MetaAddr::new_errored(addr.addr, addr.services); +/// Mark `addr` as a failed peer to `address_book_updater`. +#[instrument(skip(address_book_updater))] +async fn report_failed( + address_book_updater: tokio::sync::mpsc::Sender, + addr: MetaAddr, +) { + // The connection info is the same as what's already in the address book. + let addr = MetaAddr::new_errored(addr.addr, None); - // # Correctness - // - // Spawn address book accesses on a blocking thread, to avoid deadlocks (see #1976). - let span = Span::current(); - let updated_addr = tokio::task::spawn_blocking(move || { - span.in_scope(|| address_book.lock().unwrap().update(addr)) - }) - .wait_for_panics() - .await; - - assert_eq!( - updated_addr.map(|addr| addr.addr()), - Some(addr.addr()), - "incorrect address updated by address book: \ - original: {addr:?}, updated: {updated_addr:?}" - ); + // Ignore send errors on Zebra shutdown. + let _ = address_book_updater.send(addr).await; } diff --git a/zebra-network/src/peer_set/initialize/tests/vectors.rs b/zebra-network/src/peer_set/initialize/tests/vectors.rs index c871ab43227..59bafdc771e 100644 --- a/zebra-network/src/peer_set/initialize/tests/vectors.rs +++ b/zebra-network/src/peer_set/initialize/tests/vectors.rs @@ -24,7 +24,6 @@ use futures::{channel::mpsc, FutureExt, StreamExt}; use indexmap::IndexSet; use tokio::{io::AsyncWriteExt, net::TcpStream, task::JoinHandle}; use tower::{service_fn, Layer, Service, ServiceExt}; -use tracing::Span; use zebra_chain::{chain_tip::NoChainTip, parameters::Network, serialization::DateTime32}; use zebra_test::net::random_known_port; @@ -1517,13 +1516,8 @@ where config.peerset_initial_target_size = peerset_initial_target_size; } - // Manually initialize an address book without a timestamp tracker. - let mut address_book = AddressBook::new( - config.listen_addr, - config.network, - config.max_connections_per_ip, - Span::current(), - ); + let (address_book, address_book_updater, _address_metrics, _address_book_updater_guard) = + AddressBookUpdater::spawn(&config, config.listen_addr); // Add enough fake peers to go over the limit, even if the limit is zero. let over_limit_peers = config.peerset_outbound_connection_limit() * 2 + 1; @@ -1540,7 +1534,10 @@ where .new_gossiped_change() .expect("created MetaAddr contains enough information to represent a gossiped address"); - address_book.update(addr); + address_book + .lock() + .expect("panic in previous thread while accessing the address book") + .update(addr); } // Create a fake peer set. @@ -1555,8 +1552,6 @@ where Ok(rsp) }); - let address_book = Arc::new(std::sync::Mutex::new(address_book)); - // Make the channels large enough to hold all the peers. let (peerset_tx, peerset_rx) = mpsc::channel::(over_limit_peers); let (mut demand_tx, demand_rx) = mpsc::channel::(over_limit_peers); @@ -1581,6 +1576,7 @@ where outbound_connector, peerset_tx, active_outbound_connections, + address_book_updater, ); let crawl_task_handle = tokio::spawn(crawl_fut); diff --git a/zebra-network/src/peer_set/inventory_registry.rs b/zebra-network/src/peer_set/inventory_registry.rs index b3de4ccee7e..0b4e01cea14 100644 --- a/zebra-network/src/peer_set/inventory_registry.rs +++ b/zebra-network/src/peer_set/inventory_registry.rs @@ -275,22 +275,30 @@ impl InventoryRegistry { self.current.iter().chain(self.prev.iter()) } - /// Returns a future that polls once for new registry updates. + /// Returns a future that waits for new registry updates. #[allow(dead_code)] pub fn update(&mut self) -> Update { Update::new(self) } - /// Drive periodic inventory tasks + /// Drive periodic inventory tasks. /// - /// # Details + /// Rotates the inventory HashMaps on every timer tick. + /// Drains the inv_stream channel and registers all advertised inventory. /// - /// - rotates HashMaps based on interval events - /// - drains the inv_stream channel and registers all advertised inventory - pub fn poll_inventory(&mut self, cx: &mut Context<'_>) -> Result<(), BoxError> { + /// Returns an error if the inventory channel is closed. + /// + /// Otherwise, returns `Ok` if it performed at least one update or rotation, or `Poll::Pending` + /// if there was no inventory change. Always registers a wakeup for the next inventory update + /// or rotation, even when it returns `Ok`. + pub fn poll_inventory(&mut self, cx: &mut Context<'_>) -> Poll> { + let mut result = Poll::Pending; + // # Correctness // // Registers the current task for wakeup when the timer next becomes ready. + // (But doesn't return, because we also want to register the task for wakeup when more + // inventory arrives.) // // # Security // @@ -302,6 +310,7 @@ impl InventoryRegistry { // two interval ticks are delayed. if Pin::new(&mut self.interval).poll_next(cx).is_ready() { self.rotate(); + result = Poll::Ready(Ok(())); } // This module uses a broadcast channel instead of an mpsc channel, even @@ -320,24 +329,38 @@ impl InventoryRegistry { // rather than propagating it through the peer set's Service::poll_ready // implementation, where reporting a failure means reporting a permanent // failure of the peer set. - while let Poll::Ready(channel_result) = self.inv_stream.next().poll_unpin(cx) { - match channel_result { - Some(Ok(change)) => self.register(change), - Some(Err(BroadcastStreamRecvError::Lagged(count))) => { - metrics::counter!("pool.inventory.dropped", 1); - metrics::counter!("pool.inventory.dropped.messages", count); - // If this message happens a lot, we should improve inventory registry performance, - // or poll the registry or peer set in a separate task. + // Returns Pending if all messages are processed, but the channel could get more. + loop { + let channel_result = self.inv_stream.next().poll_unpin(cx); + + match channel_result { + Poll::Ready(Some(Ok(change))) => { + self.register(change); + result = Poll::Ready(Ok(())); + } + Poll::Ready(Some(Err(BroadcastStreamRecvError::Lagged(count)))) => { + // This isn't a fatal inventory error, it's expected behaviour when Zebra is + // under load from peers. + metrics::counter!("pool.inventory.dropped").increment(1); + metrics::counter!("pool.inventory.dropped.messages").increment(count); + + // If this message happens a lot, we should improve inventory registry + // performance, or poll the registry or peer set in a separate task. info!(count, "dropped lagged inventory advertisements"); } - // This indicates all senders, including the one in the handshaker, - // have been dropped, which really is a permanent failure. - None => return Err(broadcast::error::RecvError::Closed.into()), + Poll::Ready(None) => { + // If the channel is empty and returns None, all senders, including the one in + // the handshaker, have been dropped, which really is a permanent failure. + result = Poll::Ready(Err(broadcast::error::RecvError::Closed.into())); + } + Poll::Pending => { + break; + } } } - Ok(()) + result } /// Record the given inventory `change` for the peer `addr`. diff --git a/zebra-network/src/peer_set/inventory_registry/tests/prop.rs b/zebra-network/src/peer_set/inventory_registry/tests/prop.rs index 959d7b9182f..6bed4fbd729 100644 --- a/zebra-network/src/peer_set/inventory_registry/tests/prop.rs +++ b/zebra-network/src/peer_set/inventory_registry/tests/prop.rs @@ -1,7 +1,11 @@ //! Randomised property tests for the inventory registry. -use std::collections::HashSet; +use std::{ + collections::HashSet, + task::{Context, Poll}, +}; +use futures::task::noop_waker; use proptest::prelude::*; use crate::{ @@ -81,10 +85,12 @@ async fn inv_registry_inbound_wrapper_with( forwarded_msg.expect("unexpected forwarded error result"), ); - inv_registry - .update() - .await - .expect("unexpected dropped registry sender channel"); + // We don't actually care if the registry takes any action here. + let waker = noop_waker(); + let mut cx = Context::from_waker(&waker); + let _poll_pending_or_ok: Poll<()> = inv_registry + .poll_inventory(&mut cx) + .map(|result| result.expect("unexpected error polling inventory")); let test_peer = test_peer .get_transient_addr() diff --git a/zebra-network/src/peer_set/inventory_registry/update.rs b/zebra-network/src/peer_set/inventory_registry/update.rs index 9ebedc55a4b..5089d64d3bc 100644 --- a/zebra-network/src/peer_set/inventory_registry/update.rs +++ b/zebra-network/src/peer_set/inventory_registry/update.rs @@ -19,8 +19,11 @@ pub struct Update<'a> { impl Unpin for Update<'_> {} impl<'a> Update<'a> { - #[allow(dead_code)] - pub(super) fn new(registry: &'a mut InventoryRegistry) -> Self { + /// Returns a new future that returns when the next inventory update or rotation has been + /// completed by `registry`. + /// + /// See [`InventoryRegistry::poll_inventory()`] for details. + pub fn new(registry: &'a mut InventoryRegistry) -> Self { Self { registry } } } @@ -28,9 +31,10 @@ impl<'a> Update<'a> { impl Future for Update<'_> { type Output = Result<(), BoxError>; + /// A future that returns when the next inventory update or rotation has been completed. + /// + /// See [`InventoryRegistry::poll_inventory()`] for details. fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - // TODO: should the future wait until new changes arrive? - // or for the rotation timer? - Poll::Ready(self.registry.poll_inventory(cx)) + self.registry.poll_inventory(cx) } } diff --git a/zebra-network/src/peer_set/set.rs b/zebra-network/src/peer_set/set.rs index 2fa546c9883..722e09b6ec1 100644 --- a/zebra-network/src/peer_set/set.rs +++ b/zebra-network/src/peer_set/set.rs @@ -97,7 +97,6 @@ use std::{ collections::{HashMap, HashSet}, convert, fmt::Debug, - future::Future, marker::PhantomData, net::IpAddr, pin::Pin, @@ -107,14 +106,15 @@ use std::{ use futures::{ channel::{mpsc, oneshot}, - future::{FutureExt, TryFutureExt}, + future::{Future, FutureExt, TryFutureExt}, prelude::*, stream::FuturesUnordered, + task::noop_waker, }; use itertools::Itertools; use num_integer::div_ceil; use tokio::{ - sync::{broadcast, oneshot::error::TryRecvError, watch}, + sync::{broadcast, watch}, task::JoinHandle, }; use tower::{ @@ -191,17 +191,6 @@ where // Request Routing // - /// A preselected ready service. - /// - /// # Correctness - /// - /// If this is `Some(addr)`, `addr` must be a key for a peer in `ready_services`. - /// If that peer is removed from `ready_services`, we must set the preselected peer to `None`. - /// - /// This is handled by [`PeerSet::take_ready_service`] and - /// [`PeerSet::disconnect_from_outdated_peers`]. - preselected_p2c_peer: Option, - /// Stores gossiped inventory hashes from connected peers. /// /// Used to route inventory requests to peers that are likely to have it. @@ -265,7 +254,11 @@ where C: ChainTip, { fn drop(&mut self) { - self.shut_down_tasks_and_channels() + // We don't have access to the current task (if any), so we just drop everything we can. + let waker = noop_waker(); + let mut cx = Context::from_waker(&waker); + + self.shut_down_tasks_and_channels(&mut cx); } } @@ -310,7 +303,6 @@ where // Ready peers ready_services: HashMap::new(), // Request Routing - preselected_p2c_peer: None, inventory_registry: InventoryRegistry::new(inv_stream), // Busy peers @@ -335,52 +327,51 @@ where /// Check background task handles to make sure they're still running. /// + /// Never returns `Ok`. + /// /// If any background task exits, shuts down all other background tasks, - /// and returns an error. - fn poll_background_errors(&mut self, cx: &mut Context) -> Result<(), BoxError> { - if let Some(result) = self.receive_tasks_if_needed() { - return result; - } - - match Pin::new(&mut self.guards).poll_next(cx) { - // All background tasks are still running. - Poll::Pending => Ok(()), - - Poll::Ready(Some(res)) => { + /// and returns an error. Otherwise, returns `Pending`, and registers a wakeup for + /// receiving the background tasks, or the background tasks exiting. + fn poll_background_errors(&mut self, cx: &mut Context<'_>) -> Poll> { + futures::ready!(self.receive_tasks_if_needed(cx))?; + + // Return Pending if all background tasks are still running. + match futures::ready!(Pin::new(&mut self.guards).poll_next(cx)) { + Some(res) => { info!( background_tasks = %self.guards.len(), "a peer set background task exited, shutting down other peer set tasks" ); - self.shut_down_tasks_and_channels(); + self.shut_down_tasks_and_channels(cx); - // Flatten the join result and inner result, - // then turn Ok() task exits into errors. + // Flatten the join result and inner result, and return any errors. res.map_err(Into::into) // TODO: replace with Result::flatten when it stabilises (#70142) - .and_then(convert::identity) - .and(Err("a peer set background task exited".into())) + .and_then(convert::identity)?; + + // Turn Ok() task exits into errors. + Poll::Ready(Err("a peer set background task exited".into())) } - Poll::Ready(None) => { - self.shut_down_tasks_and_channels(); - Err("all peer set background tasks have exited".into()) + None => { + self.shut_down_tasks_and_channels(cx); + Poll::Ready(Err("all peer set background tasks have exited".into())) } } } - /// Receive background tasks, if they've been sent on the channel, - /// but not consumed yet. + /// Receive background tasks, if they've been sent on the channel, but not consumed yet. /// - /// Returns a result representing the current task state, - /// or `None` if the background tasks should be polled to check their state. - fn receive_tasks_if_needed(&mut self) -> Option> { + /// Returns a result representing the current task state, or `Poll::Pending` if the background + /// tasks should be polled again to check their state. + fn receive_tasks_if_needed(&mut self, cx: &mut Context<'_>) -> Poll> { if self.guards.is_empty() { - match self.handle_rx.try_recv() { - // The tasks haven't been sent yet. - Err(TryRecvError::Empty) => Some(Ok(())), + // Return Pending if the tasks have not been sent yet. + let handles = futures::ready!(Pin::new(&mut self.handle_rx).poll(cx)); - // The tasks have been sent, but not consumed. + match handles { + // The tasks have been sent, but not consumed yet. Ok(handles) => { // Currently, the peer set treats an empty background task set as an error. // @@ -393,21 +384,16 @@ where self.guards.extend(handles); - None + Poll::Ready(Ok(())) } - // The tasks have been sent and consumed, but then they exited. - // - // Correctness: the peer set must receive at least one task. - // - // TODO: refactor `handle_rx` and `guards` into an enum - // for the background task state: Waiting/Running/Shutdown. - Err(TryRecvError::Closed) => { - Some(Err("all peer set background tasks have exited".into())) - } + // The sender was dropped without sending the tasks. + Err(_) => Poll::Ready(Err( + "sender did not send peer background tasks before it was dropped".into(), + )), } } else { - None + Poll::Ready(Ok(())) } } @@ -415,9 +401,8 @@ where /// - services by dropping the service lists /// - background tasks via their join handles or cancel handles /// - channels by closing the channel - fn shut_down_tasks_and_channels(&mut self) { + fn shut_down_tasks_and_channels(&mut self, cx: &mut Context<'_>) { // Drop services and cancel their background tasks. - self.preselected_p2c_peer = None; self.ready_services = HashMap::new(); for (_peer_key, handle) in self.cancel_handles.drain() { @@ -429,9 +414,9 @@ where // so we don't add more peers to a shut down peer set. self.demand_signal.close_channel(); - // Shut down background tasks. + // Shut down background tasks, ignoring pending polls. self.handle_rx.close(); - self.receive_tasks_if_needed(); + let _ = self.receive_tasks_if_needed(cx); for guard in self.guards.iter() { guard.abort(); } @@ -441,25 +426,63 @@ where /// /// Move newly ready services to the ready list if they are for peers with supported protocol /// versions, otherwise they are dropped. Also drop failed services. - fn poll_unready(&mut self, cx: &mut Context<'_>) { + /// + /// Never returns an error. + /// + /// Returns `Ok(Some(())` if at least one peer became ready, `Poll::Pending` if there are + /// unready peers, but none became ready, and `Ok(None)` if the unready peers were empty. + /// + /// If there are any remaining unready peers, registers a wakeup for the next time one becomes + /// ready. If there are no unready peers, doesn't register any wakeups. (Since wakeups come + /// from peers, there needs to be at least one peer to register a wakeup.) + fn poll_unready(&mut self, cx: &mut Context<'_>) -> Poll, BoxError>> { + let mut result = Poll::Pending; + + // # Correctness + // + // `poll_next()` must always be called, because `self.unready_services` could have been + // empty before the call to `self.poll_ready()`. + // + // > When new futures are added, `poll_next` must be called in order to begin receiving + // > wake-ups for new futures. + // + // + // + // Returns Pending if we've finished processing the unready service changes, + // but there are still some unready services. loop { - match Pin::new(&mut self.unready_services).poll_next(cx) { - // No unready service changes, or empty unready services - Poll::Pending | Poll::Ready(None) => return, + // No ready peers left, but there are some unready peers pending. + let Poll::Ready(ready_peer) = Pin::new(&mut self.unready_services).poll_next(cx) else { + break; + }; + + match ready_peer { + // No unready peers in the list. + None => { + // If we've finished processing the unready service changes, and there are no + // unready services left, it doesn't make sense to return Pending, because + // their stream is terminated. But when we add more unready peers and call + // `poll_next()`, its termination status will be reset, and it will receive + // wakeups again. + if result.is_pending() { + result = Poll::Ready(Ok(None)); + } + + break; + } // Unready -> Ready - Poll::Ready(Some(Ok((key, svc)))) => { + Some(Ok((key, svc))) => { trace!(?key, "service became ready"); - let cancel = self.cancel_handles.remove(&key); - assert!(cancel.is_some(), "missing cancel handle"); - if svc.remote_version() >= self.minimum_peer_version.current() { - self.ready_services.insert(key, svc); - } + self.push_ready(true, key, svc); + + // Return Ok if at least one peer became ready. + result = Poll::Ready(Ok(Some(()))); } // Unready -> Canceled - Poll::Ready(Some(Err((key, UnreadyError::Canceled)))) => { + Some(Err((key, UnreadyError::Canceled))) => { // A service be canceled because we've connected to the same service twice. // In that case, there is a cancel handle for the peer address, // but it belongs to the service for the newer connection. @@ -469,7 +492,7 @@ where "service was canceled, dropping service" ); } - Poll::Ready(Some(Err((key, UnreadyError::CancelHandleDropped(_))))) => { + Some(Err((key, UnreadyError::CancelHandleDropped(_)))) => { // Similarly, services with dropped cancel handes can have duplicates. trace!( ?key, @@ -479,7 +502,7 @@ where } // Unready -> Errored - Poll::Ready(Some(Err((key, UnreadyError::Inner(error))))) => { + Some(Err((key, UnreadyError::Inner(error)))) => { debug!(%error, "service failed while unready, dropping service"); let cancel = self.cancel_handles.remove(&key); @@ -487,6 +510,57 @@ where } } } + + result + } + + /// Checks previously ready peer services for errors. + /// + /// The only way these peer `Client`s can become unready is when we send them a request, + /// because the peer set has exclusive access to send requests to each peer. (If an inbound + /// request is in progress, it will be handled, then our request will be sent by the connection + /// task.) + /// + /// Returns `Poll::Ready` if there are some ready peers, and `Poll::Pending` if there are no + /// ready peers. Registers a wakeup if any peer has failed due to a disconnection, hang, or protocol error. + /// + /// # Panics + /// + /// If any peers somehow became unready without being sent a request. This indicates a bug in the peer set, where requests + /// are sent to peers without putting them in `unready_peers`. + fn poll_ready_peer_errors(&mut self, cx: &mut Context<'_>) -> Poll<()> { + let mut previous = HashMap::new(); + std::mem::swap(&mut previous, &mut self.ready_services); + + // TODO: consider only checking some peers each poll (for performance reasons), + // but make sure we eventually check all of them. + for (key, mut svc) in previous.drain() { + let Poll::Ready(peer_readiness) = Pin::new(&mut svc).poll_ready(cx) else { + unreachable!( + "unexpected unready peer: peers must be put into the unready_peers list \ + after sending them a request" + ); + }; + + match peer_readiness { + // Still ready, add it back to the list. + Ok(()) => self.push_ready(false, key, svc), + + // Ready -> Errored + Err(error) => { + debug!(%error, "service failed while ready, dropping service"); + + // Ready services can just be dropped, they don't need any cleanup. + std::mem::drop(svc); + } + } + } + + if self.ready_services.is_empty() { + Poll::Pending + } else { + Poll::Ready(()) + } } /// Returns the number of peer connections Zebra already has with @@ -509,17 +583,35 @@ where self.ready_services.contains_key(&addr) || self.cancel_handles.contains_key(&addr) } - /// Checks for newly inserted or removed services. + /// Processes the entire list of newly inserted or removed services. /// /// Puts inserted services in the unready list. /// Drops removed services, after cancelling any pending requests. + /// + /// If the peer connector channel is closed, returns an error. + /// + /// Otherwise, returns `Ok` if it discovered at least one peer, or `Poll::Pending` if it didn't + /// discover any peers. Always registers a wakeup for new peers, even when it returns `Ok`. fn poll_discover(&mut self, cx: &mut Context<'_>) -> Poll> { - use futures::ready; + // Return pending if there are no peers in the list. + let mut result = Poll::Pending; + loop { - match ready!(Pin::new(&mut self.discover).poll_discover(cx)) + // If we've emptied the list, finish looping, otherwise process the new peer. + let Poll::Ready(discovered) = Pin::new(&mut self.discover).poll_discover(cx) else { + break; + }; + + // If the change channel has a permanent error, return that error. + let change = discovered .ok_or("discovery stream closed")? - .map_err(Into::into)? - { + .map_err(Into::into)?; + + // Otherwise we have successfully processed a peer. + result = Poll::Ready(Ok(())); + + // Process each change. + match change { Change::Remove(key) => { trace!(?key, "got Change::Remove from Discover"); self.remove(&key); @@ -552,32 +644,22 @@ where } } } + + result } /// Checks if the minimum peer version has changed, and disconnects from outdated peers. fn disconnect_from_outdated_peers(&mut self) { if let Some(minimum_version) = self.minimum_peer_version.changed() { - self.ready_services.retain(|address, peer| { - if peer.remote_version() >= minimum_version { - true - } else { - if self.preselected_p2c_peer == Some(*address) { - self.preselected_p2c_peer = None; - } - - false - } - }); + // It is ok to drop ready services, they don't need anything cancelled. + self.ready_services + .retain(|_address, peer| peer.remote_version() >= minimum_version); } } - /// Takes a ready service by key, invalidating `preselected_p2c_peer` if needed. + /// Takes a ready service by key. fn take_ready_service(&mut self, key: &D::Key) -> Option { if let Some(svc) = self.ready_services.remove(key) { - if Some(*key) == self.preselected_p2c_peer { - self.preselected_p2c_peer = None; - } - assert!( !self.cancel_handles.contains_key(key), "cancel handles are only used for unready service work" @@ -605,6 +687,25 @@ where } } + /// Adds a ready service to the ready list if it's for a peer with a supported version. + /// If `was_unready` is true, also removes the peer's cancel handle. + /// + /// If the service is for a connection to an outdated peer, the service is dropped. + fn push_ready(&mut self, was_unready: bool, key: D::Key, svc: D::Service) { + let cancel = self.cancel_handles.remove(&key); + assert_eq!( + cancel.is_some(), + was_unready, + "missing or unexpected cancel handle" + ); + + if svc.remote_version() >= self.minimum_peer_version.current() { + self.ready_services.insert(key, svc); + } else { + std::mem::drop(svc); + } + } + /// Adds a busy service to the unready list if it's for a peer with a supported version, /// and adds a cancel handle for the service's current request. /// @@ -631,7 +732,7 @@ where } /// Performs P2C on `self.ready_services` to randomly select a less-loaded ready service. - fn preselect_p2c_peer(&self) -> Option { + fn select_ready_p2c_peer(&self) -> Option { self.select_p2c_peer_from_list(&self.ready_services.keys().copied().collect()) } @@ -647,8 +748,7 @@ where .expect("just checked there is one service"), ), len => { - // If there are only 2 peers, randomise their order. - // Otherwise, choose 2 random peers in a random order. + // Choose 2 random peers, then return the least loaded of those 2 peers. let (a, b) = { let idxs = rand::seq::index::sample(&mut rand::thread_rng(), len, 2); let a = idxs.index(0); @@ -708,19 +808,32 @@ where /// Routes a request using P2C load-balancing. fn route_p2c(&mut self, req: Request) -> >::Future { - let preselected_key = self - .preselected_p2c_peer - .expect("ready peer service must have a preselected peer"); + if let Some(p2c_key) = self.select_ready_p2c_peer() { + tracing::trace!(?p2c_key, "routing based on p2c"); + + let mut svc = self + .take_ready_service(&p2c_key) + .expect("selected peer must be ready"); - tracing::trace!(?preselected_key, "routing based on p2c"); + let fut = svc.call(req); + self.push_unready(p2c_key, svc); + + return fut.map_err(Into::into).boxed(); + } - let mut svc = self - .take_ready_service(&preselected_key) - .expect("ready peer set must have preselected a ready peer"); + async move { + // Let other tasks run, so a retry request might get different ready peers. + tokio::task::yield_now().await; - let fut = svc.call(req); - self.push_unready(preselected_key, svc); - fut.map_err(Into::into).boxed() + // # Security + // + // Avoid routing requests to peers that are missing inventory. + // If we kept trying doomed requests, peers that are missing our requested inventory + // could take up a large amount of our bandwidth and retry limits. + Err(SharedPeerError::from(PeerError::NoReadyPeers)) + } + .map_err(Into::into) + .boxed() } /// Tries to route a request to a ready peer that advertised that inventory, @@ -967,9 +1080,9 @@ where let num_ready = self.ready_services.len(); let num_unready = self.unready_services.len(); let num_peers = num_ready + num_unready; - metrics::gauge!("pool.num_ready", num_ready as f64); - metrics::gauge!("pool.num_unready", num_unready as f64); - metrics::gauge!("zcash.net.peers", num_peers as f64); + metrics::gauge!("pool.num_ready").set(num_ready as f64); + metrics::gauge!("pool.num_unready").set(num_unready as f64); + metrics::gauge!("zcash.net.peers").set(num_peers as f64); // Security: make sure we haven't exceeded the connection limit if num_peers > self.peerset_total_connection_limit { @@ -995,78 +1108,71 @@ where Pin> + Send + 'static>>; fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - self.poll_background_errors(cx)?; + // Update service and peer statuses. + // + // # Correctness + // + // All of the futures that receive a context from this method can wake the peer set buffer + // task. If there are no ready peers, and no new peers, network requests will pause until: + // - an unready peer becomes ready, or + // - a new peer arrives. + + // Check for new peers, and register a task wakeup when the next new peers arrive. New peers + // can be infrequent if our connection slots are full, or we're connected to all + // available/useful peers. + let _poll_pending_or_ready: Poll<()> = self.poll_discover(cx)?; + + // These tasks don't provide new peers or newly ready peers. + let _poll_pending: Poll<()> = self.poll_background_errors(cx)?; + let _poll_pending_or_ready: Poll<()> = self.inventory_registry.poll_inventory(cx)?; + + // Check for newly ready peers, including newly added peers (which are added as unready). + // So it needs to run after `poll_discover()`. Registers a wakeup if there are any unready + // peers. + // + // Each connected peer should become ready within a few minutes, or timeout, close the + // connection, and release its connection slot. + // + // TODO: drop peers that overload us with inbound messages and never become ready (#7822) + let _poll_pending_or_ready: Poll> = self.poll_unready(cx)?; + + // Cleanup and metrics. - // Update peer statuses - let _ = self.poll_discover(cx)?; + // Only checks the versions of ready peers, so it needs to run after `poll_unready()`. self.disconnect_from_outdated_peers(); - self.inventory_registry.poll_inventory(cx)?; - self.poll_unready(cx); + // These metrics should run last, to report the most up-to-date information. self.log_peer_set_size(); self.update_metrics(); - loop { - // Re-check that the pre-selected service is ready, in case - // something has happened since (e.g., it failed, peer closed - // connection, ...) - if let Some(key) = self.preselected_p2c_peer { - trace!(preselected_key = ?key); - let mut service = self - .take_ready_service(&key) - .expect("preselected peer must be in the ready list"); - match service.poll_ready(cx) { - Poll::Ready(Ok(())) => { - trace!("preselected service is still ready, keeping it selected"); - self.preselected_p2c_peer = Some(key); - self.ready_services.insert(key, service); - return Poll::Ready(Ok(())); - } - Poll::Pending => { - trace!("preselected service is no longer ready, moving to unready list"); - self.push_unready(key, service); - } - Poll::Ready(Err(error)) => { - trace!(%error, "preselected service failed, dropping it"); - std::mem::drop(service); - } - } - } + // Check for failures in ready peers, removing newly errored or disconnected peers. + // So it needs to run after `poll_unready()`. + let ready_peers: Poll<()> = self.poll_ready_peer_errors(cx); - trace!("preselected service was not ready, preselecting another ready service"); - self.preselected_p2c_peer = self.preselect_p2c_peer(); - self.update_metrics(); - - if self.preselected_p2c_peer.is_none() { - // CORRECTNESS - // - // If the channel is full, drop the demand signal rather than waiting. - // If we waited here, the crawler could deadlock sending a request to - // fetch more peers, because it also empties the channel. - trace!("no ready services, sending demand signal"); - let _ = self.demand_signal.try_send(MorePeers); - - // CORRECTNESS - // - // The current task must be scheduled for wakeup every time we - // return `Poll::Pending`. - // - // As long as there are unready or new peers, this task will run, - // because: - // - `poll_discover` schedules this task for wakeup when new - // peers arrive. - // - if there are unready peers, `poll_unready` schedules this - // task for wakeup when peer services become ready. - // - if the preselected peer is not ready, `service.poll_ready` - // schedules this task for wakeup when that service becomes - // ready. - // - // To avoid peers blocking on a full background error channel: - // - if no background tasks have exited since the last poll, - // `poll_background_errors` schedules this task for wakeup when - // the next task exits. - return Poll::Pending; - } + if ready_peers.is_pending() { + // # Correctness + // + // If the channel is full, drop the demand signal rather than waiting. If we waited + // here, the crawler could deadlock sending a request to fetch more peers, because it + // also empties the channel. + trace!("no ready services, sending demand signal"); + let _ = self.demand_signal.try_send(MorePeers); + + // # Correctness + // + // The current task must be scheduled for wakeup every time we return `Poll::Pending`. + // + // As long as there are unready or new peers, this task will run, because: + // - `poll_discover` schedules this task for wakeup when new peers arrive. + // - if there are unready peers, `poll_unready` or `poll_ready_peers` schedule this + // task for wakeup when peer services become ready. + // + // To avoid peers blocking on a full peer status/error channel: + // - `poll_background_errors` schedules this task for wakeup when the peer status + // update task exits. + Poll::Pending + } else { + Poll::Ready(Ok(())) } } diff --git a/zebra-network/src/peer_set/set/tests/vectors.rs b/zebra-network/src/peer_set/set/tests/vectors.rs index 8290469997c..02ddc25d85e 100644 --- a/zebra-network/src/peer_set/set/tests/vectors.rs +++ b/zebra-network/src/peer_set/set/tests/vectors.rs @@ -10,7 +10,6 @@ use zebra_chain::{ parameters::{Network, NetworkUpgrade}, }; -use super::{PeerSetBuilder, PeerVersions}; use crate::{ constants::DEFAULT_MAX_CONNS_PER_IP, peer::{ClientRequest, MinimumPeerVersion}, @@ -19,6 +18,8 @@ use crate::{ Request, SharedPeerError, }; +use super::{PeerSetBuilder, PeerVersions}; + #[test] fn peer_set_ready_single_connection() { // We are going to use just one peer version in this test diff --git a/zebra-network/src/protocol/external/codec.rs b/zebra-network/src/protocol/external/codec.rs index 6a4ae0585eb..88f54c6b242 100644 --- a/zebra-network/src/protocol/external/codec.rs +++ b/zebra-network/src/protocol/external/codec.rs @@ -126,8 +126,8 @@ impl Encoder for Codec { if let Some(addr_label) = self.builder.metrics_addr_label.clone() { metrics::counter!("zcash.net.out.bytes.total", - (body_length + HEADER_LEN) as u64, - "addr" => addr_label); + "addr" => addr_label) + .increment((body_length + HEADER_LEN) as u64); } use Message::*; @@ -397,7 +397,8 @@ impl Decoder for Codec { } if let Some(label) = self.builder.metrics_addr_label.clone() { - metrics::counter!("zcash.net.in.bytes.total", (body_len + HEADER_LEN) as u64, "addr" => label); + metrics::counter!("zcash.net.in.bytes.total", "addr" => label) + .increment((body_len + HEADER_LEN) as u64); } // Reserve buffer space for the expected body and the following header. diff --git a/zebra-network/src/protocol/internal/response.rs b/zebra-network/src/protocol/internal/response.rs index 379517bb067..6e1cd3291ff 100644 --- a/zebra-network/src/protocol/internal/response.rs +++ b/zebra-network/src/protocol/internal/response.rs @@ -142,4 +142,10 @@ impl Response { pub fn is_inventory_download(&self) -> bool { matches!(self, Response::Blocks(_) | Response::Transactions(_)) } + + /// Returns true if self is the [`Response::Nil`] variant. + #[allow(dead_code)] + pub fn is_nil(&self) -> bool { + matches!(self, Self::Nil) + } } diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index dd43c4df7c5..9c820224a3b 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-node-services" -version = "1.0.0-beta.30" +version = "1.0.0-beta.35" authors = ["Zcash Foundation "] description = "The interfaces of some Zebra node services" license = "MIT OR Apache-2.0" @@ -19,7 +19,7 @@ default = [] # Production features that activate extra dependencies, or extra features in dependencies -# Experimental mining RPC support +# Mining RPC support getblocktemplate-rpcs = [ "zebra-chain/getblocktemplate-rpcs", ] @@ -34,8 +34,10 @@ rpc-client = [ "serde_json", ] +shielded-scan = ["tokio"] + [dependencies] -zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.30" } +zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.35" } # Optional dependencies @@ -43,14 +45,15 @@ zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.30" } color-eyre = { version = "0.6.2", optional = true } jsonrpc-core = { version = "18.0.0", optional = true } # Security: avoid default dependency on openssl -reqwest = { version = "0.11.22", default-features = false, features = ["rustls-tls"], optional = true } -serde = { version = "1.0.188", optional = true } -serde_json = { version = "1.0.107", optional = true } +reqwest = { version = "0.11.24", default-features = false, features = ["rustls-tls"], optional = true } +serde = { version = "1.0.196", optional = true } +serde_json = { version = "1.0.113", optional = true } +tokio = { version = "1.36.0", features = ["time"], optional = true } [dev-dependencies] color-eyre = "0.6.2" jsonrpc-core = "18.0.0" -reqwest = { version = "0.11.22", default-features = false, features = ["rustls-tls"] } -serde = "1.0.188" -serde_json = "1.0.107" +reqwest = { version = "0.11.24", default-features = false, features = ["rustls-tls"] } +serde = "1.0.196" +serde_json = "1.0.113" diff --git a/zebra-node-services/src/lib.rs b/zebra-node-services/src/lib.rs index f521a00301b..eb42f9319de 100644 --- a/zebra-node-services/src/lib.rs +++ b/zebra-node-services/src/lib.rs @@ -12,3 +12,6 @@ pub mod rpc_client; /// non-'static lifetimes, (e.g., when a type contains a borrow and is /// parameterized by 'a), *not* that the object itself has 'static lifetime. pub type BoxError = Box; + +#[cfg(feature = "shielded-scan")] +pub mod scan_service; diff --git a/zebra-node-services/src/scan_service.rs b/zebra-node-services/src/scan_service.rs new file mode 100644 index 00000000000..155f1f50bca --- /dev/null +++ b/zebra-node-services/src/scan_service.rs @@ -0,0 +1,4 @@ +//! Request and response types for zebra-scan tower service. + +pub mod request; +pub mod response; diff --git a/zebra-node-services/src/scan_service/request.rs b/zebra-node-services/src/scan_service/request.rs new file mode 100644 index 00000000000..1490501d2b3 --- /dev/null +++ b/zebra-node-services/src/scan_service/request.rs @@ -0,0 +1,88 @@ +//! `zebra_scan::service::ScanService` request types. + +use std::collections::HashSet; + +use crate::BoxError; + +/// The maximum number of keys that may be included in a request to the scan service +const MAX_REQUEST_KEYS: usize = 1000; + +#[derive(Debug)] +/// Request types for `zebra_scan::service::ScanService` +pub enum Request { + /// Requests general info about the scanner + Info, + + /// TODO: Accept `KeyHash`es and return key hashes that are registered + CheckKeyHashes(Vec<()>), + + /// Submits viewing keys with their optional birth-heights for scanning. + RegisterKeys(Vec<(String, Option)>), + + /// Deletes viewing keys and their results from the database. + DeleteKeys(Vec), + + /// Accept keys and return transaction data + Results(Vec), + + /// Accept keys and return a channel receiver for transaction data + SubscribeResults(HashSet), + + /// Clear the results for a set of viewing keys + ClearResults(Vec), +} + +impl Request { + /// Check that the request data is valid for the request variant + pub fn check(&self) -> Result<(), BoxError> { + self.check_num_keys()?; + + Ok(()) + } + + /// Checks that requests which include keys have a valid number of keys. + fn check_num_keys(&self) -> Result<(), BoxError> { + match self { + Request::DeleteKeys(keys) | Request::ClearResults(keys) + if keys.is_empty() || keys.len() > MAX_REQUEST_KEYS => + { + Err(format!("request must include between 1 and {MAX_REQUEST_KEYS} keys").into()) + } + + _ => Ok(()), + } + } +} + +#[test] +fn test_check_num_keys() { + let fake_keys: Vec<_> = std::iter::repeat(String::new()) + .take(MAX_REQUEST_KEYS + 1) + .collect(); + + let bad_requests = [ + Request::DeleteKeys(vec![]), + Request::DeleteKeys(fake_keys.clone()), + Request::ClearResults(vec![]), + Request::ClearResults(fake_keys), + ]; + + let valid_requests = [ + Request::DeleteKeys(vec![String::new()]), + Request::ClearResults(vec![String::new()]), + ]; + + for request in bad_requests { + let error = request.check().expect_err("check should return an error"); + + assert_eq!( + format!("request must include between 1 and {MAX_REQUEST_KEYS} keys"), + error.to_string(), + "check_num_keys should return an error because there are too many keys" + ); + } + + for request in valid_requests { + request.check().expect("check should return Ok(())"); + } +} diff --git a/zebra-node-services/src/scan_service/response.rs b/zebra-node-services/src/scan_service/response.rs new file mode 100644 index 00000000000..2c22fa3a3b2 --- /dev/null +++ b/zebra-node-services/src/scan_service/response.rs @@ -0,0 +1,48 @@ +//! `zebra_scan::service::ScanService` response types. + +use std::collections::BTreeMap; + +use zebra_chain::{block::Height, transaction}; + +/// A relevant transaction for a key and the block height where it was found. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ScanResult { + /// The key that successfully decrypts the transaction + pub key: String, + + /// The height of the block with the transaction + pub height: Height, + + /// A transaction ID, which uniquely identifies mined v5 transactions, + /// and all v1-v4 transactions. + pub tx_id: transaction::Hash, +} + +#[derive(Debug)] +/// Response types for `zebra_scan::service::ScanService` +pub enum Response { + /// Response to the [`Info`](super::request::Request::Info) request + Info { + /// The minimum sapling birthday height for the shielded scanner + min_sapling_birthday_height: Height, + }, + + /// Response to [`RegisterKeys`](super::request::Request::RegisterKeys) request + /// + /// Contains the keys that the scanner accepted. + RegisteredKeys(Vec), + + /// Response to [`Results`](super::request::Request::Results) request + /// + /// We use the nested `BTreeMap` so we don't repeat any piece of response data. + Results(BTreeMap>>), + + /// Response to [`DeleteKeys`](super::request::Request::DeleteKeys) request + DeletedKeys, + + /// Response to [`ClearResults`](super::request::Request::ClearResults) request + ClearedResults, + + /// Response to [`SubscribeResults`](super::request::Request::SubscribeResults) request + SubscribeResults(tokio::sync::mpsc::Receiver), +} diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 6b214c5d6fa..529a3742074 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-rpc" -version = "1.0.0-beta.30" +version = "1.0.0-beta.35" authors = ["Zcash Foundation "] description = "A Zebra JSON Remote Procedure Call (JSON-RPC) interface" license = "MIT OR Apache-2.0" @@ -19,7 +19,7 @@ default = [] # Production features that activate extra dependencies, or extra features in dependencies -# Experimental mining RPC support +# Mining RPC support getblocktemplate-rpcs = [ "rand", "zcash_address", @@ -29,6 +29,9 @@ getblocktemplate-rpcs = [ "zebra-chain/getblocktemplate-rpcs", ] +# Experimental internal miner support +internal-miner = [] + # Test-only features proptest-impl = [ "proptest", @@ -39,55 +42,54 @@ proptest-impl = [ ] [dependencies] -chrono = { version = "0.4.31", default-features = false, features = ["clock", "std"] } -futures = "0.3.28" +chrono = { version = "0.4.34", default-features = false, features = ["clock", "std"] } +futures = "0.3.30" # lightwalletd sends JSON-RPC requests over HTTP 1.1 -hyper = { version = "0.14.27", features = ["http1", "server"] } +hyper = { version = "0.14.28", features = ["http1", "server"] } jsonrpc-core = "18.0.0" jsonrpc-derive = "18.0.0" jsonrpc-http-server = "18.0.0" -num_cpus = "1.16.0" # zebra-rpc needs the preserve_order feature in serde_json, which is a dependency of jsonrpc-core -serde_json = { version = "1.0.107", features = ["preserve_order"] } -indexmap = { version = "2.0.1", features = ["serde"] } +serde_json = { version = "1.0.113", features = ["preserve_order"] } +indexmap = { version = "2.2.3", features = ["serde"] } -tokio = { version = "1.33.0", features = ["time", "rt-multi-thread", "macros", "tracing"] } +tokio = { version = "1.36.0", features = ["time", "rt-multi-thread", "macros", "tracing"] } tower = "0.4.13" tracing = "0.1.39" hex = { version = "0.4.3", features = ["serde"] } -serde = { version = "1.0.188", features = ["serde_derive"] } +serde = { version = "1.0.196", features = ["serde_derive"] } # Experimental feature getblocktemplate-rpcs rand = { version = "0.8.5", optional = true } # ECC deps used by getblocktemplate-rpcs feature -zcash_address = { version = "0.3.0", optional = true } +zcash_address = { version = "0.3.1", optional = true } # Test-only feature proptest-impl -proptest = { version = "1.3.1", optional = true } +proptest = { version = "1.4.0", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.30", features = ["json-conversion"] } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.30" } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.30" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.30" } -zebra-script = { path = "../zebra-script", version = "1.0.0-beta.30" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.30" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.35", features = ["json-conversion"] } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.35" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.35" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.35" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.35" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.35" } [dev-dependencies] insta = { version = "1.33.0", features = ["redactions", "json", "ron"] } -proptest = "1.3.1" +proptest = "1.4.0" -thiserror = "1.0.48" -tokio = { version = "1.33.0", features = ["full", "tracing", "test-util"] } +thiserror = "1.0.57" +tokio = { version = "1.36.0", features = ["full", "tracing", "test-util"] } -zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } -zebra-consensus = { path = "../zebra-consensus", features = ["proptest-impl"] } -zebra-network = { path = "../zebra-network", features = ["proptest-impl"] } -zebra-state = { path = "../zebra-state", features = ["proptest-impl"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.35", features = ["proptest-impl"] } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.35", features = ["proptest-impl"] } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.35", features = ["proptest-impl"] } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.35", features = ["proptest-impl"] } -zebra-test = { path = "../zebra-test" } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.35" } diff --git a/zebra-rpc/src/config.rs b/zebra-rpc/src/config.rs index 10a0ff4dfff..3f74ead07db 100644 --- a/zebra-rpc/src/config.rs +++ b/zebra-rpc/src/config.rs @@ -7,7 +7,7 @@ use serde::{Deserialize, Serialize}; pub mod mining; /// RPC configuration section. -#[derive(Clone, Debug, Deserialize, Serialize)] +#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)] #[serde(deny_unknown_fields, default)] pub struct Config { /// IP address and port for the RPC server. @@ -34,19 +34,19 @@ pub struct Config { /// /// Zebra's RPC server has a separate thread pool and a `tokio` executor for each thread. /// State queries are run concurrently using the shared thread pool controlled by - /// the [`SyncSection.parallel_cpu_threads`](https://doc.zebra.zfnd.org/zebrad/config/struct.SyncSection.html#structfield.parallel_cpu_threads) config. + /// the [`SyncSection.parallel_cpu_threads`](https://docs.rs/zebrad/latest/zebrad/components/sync/struct.Config.html#structfield.parallel_cpu_threads) config. /// - /// We recommend setting both configs to `0` (automatic scaling) for the best performance. - /// This uses one thread per available CPU core. + /// If the number of threads is not configured or zero, Zebra uses the number of logical cores. + /// If the number of logical cores can't be detected, Zebra uses one thread. /// - /// Set to `1` by default, which runs all RPC queries on a single thread, and detects RPC - /// port conflicts from multiple Zebra or `zcashd` instances. + /// Set to `1` to run all RPC queries on a single thread, and detect RPC port conflicts from + /// multiple Zebra or `zcashd` instances. /// /// For details, see [the `jsonrpc_http_server` documentation](https://docs.rs/jsonrpc-http-server/latest/jsonrpc_http_server/struct.ServerBuilder.html#method.threads). /// /// ## Warning /// - /// Changing this config disables RPC port conflict detection. + /// The default config uses multiple threads, which disables RPC port conflict detection. /// This can allow multiple Zebra instances to share the same RPC port. /// /// If some of those instances are outdated or failed, RPC queries can be slow or inconsistent. diff --git a/zebra-rpc/src/config/mining.rs b/zebra-rpc/src/config/mining.rs index 1a27baa3646..040b3a9e7a7 100644 --- a/zebra-rpc/src/config/mining.rs +++ b/zebra-rpc/src/config/mining.rs @@ -15,6 +15,9 @@ pub struct Config { /// `getblocktemplate` RPC coinbase transaction. pub miner_address: Option, + // TODO: Internal miner config code was removed as part of https://github.com/ZcashFoundation/zebra/issues/8180 + // Find the removed code at https://github.com/ZcashFoundation/zebra/blob/v1.5.1/zebra-rpc/src/config/mining.rs#L18-L38 + // Restore the code when conditions are met. https://github.com/ZcashFoundation/zebra/issues/8183 /// Extra data to include in coinbase transaction inputs. /// Limited to around 95 bytes by the consensus rules. /// @@ -36,6 +39,9 @@ impl Default for Config { // TODO: do we want to default to v5 transactions and Zebra coinbase data? extra_coinbase_data: None, debug_like_zcashd: true, + // TODO: Internal miner config code was removed as part of https://github.com/ZcashFoundation/zebra/issues/8180 + // Find the removed code at https://github.com/ZcashFoundation/zebra/blob/v1.5.1/zebra-rpc/src/config/mining.rs#L61-L66 + // Restore the code when conditions are met. https://github.com/ZcashFoundation/zebra/issues/8183 } } } @@ -48,4 +54,13 @@ impl Config { pub fn skip_getblocktemplate(&self) -> bool { !cfg!(feature = "getblocktemplate-rpcs") } + + /// Is the internal miner enabled using at least one thread? + #[cfg(feature = "internal-miner")] + pub fn is_internal_miner_enabled(&self) -> bool { + // TODO: Changed to return always false so internal miner is never started. Part of https://github.com/ZcashFoundation/zebra/issues/8180 + // Find the removed code at https://github.com/ZcashFoundation/zebra/blob/v1.5.1/zebra-rpc/src/config/mining.rs#L83 + // Restore the code when conditions are met. https://github.com/ZcashFoundation/zebra/issues/8183 + false + } } diff --git a/zebra-rpc/src/lib.rs b/zebra-rpc/src/lib.rs index 2f5dae4a097..81a30d78e0c 100644 --- a/zebra-rpc/src/lib.rs +++ b/zebra-rpc/src/lib.rs @@ -2,7 +2,7 @@ #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] -#![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_rpc")] +#![doc(html_root_url = "https://docs.rs/zebra_rpc")] pub mod config; pub mod constants; diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index 2df10ca723d..9fc190a9a64 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -6,7 +6,7 @@ //! Some parts of the `zcashd` RPC documentation are outdated. //! So this implementation follows the `zcashd` server and `lightwalletd` client implementations. -use std::{collections::HashSet, sync::Arc}; +use std::{collections::HashSet, fmt::Debug, sync::Arc}; use chrono::Utc; use futures::{FutureExt, TryFutureExt}; @@ -15,7 +15,7 @@ use indexmap::IndexMap; use jsonrpc_core::{self, BoxFuture, Error, ErrorCode, Result}; use jsonrpc_derive::rpc; use tokio::{sync::broadcast, task::JoinHandle}; -use tower::{buffer::Buffer, Service, ServiceExt}; +use tower::{Service, ServiceExt}; use tracing::Instrument; use zebra_chain::{ @@ -226,7 +226,7 @@ pub trait Rpc { fn get_raw_transaction( &self, txid_hex: String, - verbose: u8, + verbose: Option, ) -> BoxFuture>; /// Returns the transaction ids made by the provided transparent addresses. @@ -268,19 +268,28 @@ pub trait Rpc { } /// RPC method implementations. +#[derive(Clone)] pub struct RpcImpl where Mempool: Service< - mempool::Request, - Response = mempool::Response, - Error = zebra_node_services::BoxError, - >, + mempool::Request, + Response = mempool::Response, + Error = zebra_node_services::BoxError, + > + Clone + + Send + + Sync + + 'static, + Mempool::Future: Send, State: Service< - zebra_state::ReadRequest, - Response = zebra_state::ReadResponse, - Error = zebra_state::BoxError, - >, - Tip: ChainTip, + zebra_state::ReadRequest, + Response = zebra_state::ReadResponse, + Error = zebra_state::BoxError, + > + Clone + + Send + + Sync + + 'static, + State::Future: Send, + Tip: ChainTip + Clone + Send + Sync + 'static, { // Configuration // @@ -304,7 +313,7 @@ where // Services // /// A handle to the mempool service. - mempool: Buffer, + mempool: Mempool, /// A handle to the state service. state: State, @@ -318,13 +327,51 @@ where queue_sender: broadcast::Sender, } +impl Debug for RpcImpl +where + Mempool: Service< + mempool::Request, + Response = mempool::Response, + Error = zebra_node_services::BoxError, + > + Clone + + Send + + Sync + + 'static, + Mempool::Future: Send, + State: Service< + zebra_state::ReadRequest, + Response = zebra_state::ReadResponse, + Error = zebra_state::BoxError, + > + Clone + + Send + + Sync + + 'static, + State::Future: Send, + Tip: ChainTip + Clone + Send + Sync + 'static, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + // Skip fields without Debug impls, and skip channels + f.debug_struct("RpcImpl") + .field("build_version", &self.build_version) + .field("user_agent", &self.user_agent) + .field("network", &self.network) + .field("debug_force_finished_sync", &self.debug_force_finished_sync) + .field("debug_like_zcashd", &self.debug_like_zcashd) + .finish() + } +} + impl RpcImpl where Mempool: Service< mempool::Request, Response = mempool::Response, Error = zebra_node_services::BoxError, - > + 'static, + > + Clone + + Send + + Sync + + 'static, + Mempool::Future: Send, State: Service< zebra_state::ReadRequest, Response = zebra_state::ReadResponse, @@ -333,6 +380,7 @@ where + Send + Sync + 'static, + State::Future: Send, Tip: ChainTip + Clone + Send + Sync + 'static, { /// Create a new instance of the RPC handler. @@ -346,15 +394,13 @@ where network: Network, debug_force_finished_sync: bool, debug_like_zcashd: bool, - mempool: Buffer, + mempool: Mempool, state: State, latest_chain_tip: Tip, ) -> (Self, JoinHandle<()>) where VersionString: ToString + Clone + Send + 'static, UserAgentString: ToString + Clone + Send + 'static, - >::Future: Send, - >::Future: Send, { let (runner, queue_sender) = Queue::start(); @@ -391,11 +437,14 @@ where impl Rpc for RpcImpl where - Mempool: tower::Service< + Mempool: Service< mempool::Request, Response = mempool::Response, Error = zebra_node_services::BoxError, - > + 'static, + > + Clone + + Send + + Sync + + 'static, Mempool::Future: Send, State: Service< zebra_state::ReadRequest, @@ -945,10 +994,11 @@ where fn get_raw_transaction( &self, txid_hex: String, - verbose: u8, + verbose: Option, ) -> BoxFuture> { let mut state = self.state.clone(); let mut mempool = self.mempool.clone(); + let verbose = verbose.unwrap_or(0); let verbose = verbose != 0; async move { @@ -1172,8 +1222,8 @@ where let subtrees = subtrees .values() .map(|subtree| SubtreeRpcData { - node: subtree.node.encode_hex(), - end: subtree.end, + root: subtree.root.encode_hex(), + end_height: subtree.end_height, }) .collect(); @@ -1202,8 +1252,8 @@ where let subtrees = subtrees .values() .map(|subtree| SubtreeRpcData { - node: subtree.node.encode_hex(), - end: subtree.end, + root: subtree.root.encode_hex(), + end_height: subtree.end_height, }) .collect(); diff --git a/zebra-rpc/src/methods/get_block_template_rpcs.rs b/zebra-rpc/src/methods/get_block_template_rpcs.rs index e99258ad78f..7d0550c0598 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs.rs @@ -1,11 +1,11 @@ //! RPC methods related to mining only available with `getblocktemplate-rpcs` rust feature. -use std::{sync::Arc, time::Duration}; +use std::{fmt::Debug, sync::Arc, time::Duration}; use futures::{future::OptionFuture, FutureExt, TryFutureExt}; use jsonrpc_core::{self, BoxFuture, Error, ErrorCode, Result}; use jsonrpc_derive::rpc; -use tower::{buffer::Buffer, Service, ServiceExt}; +use tower::{Service, ServiceExt}; use zcash_address::{self, unified::Encoding, TryFromAddress}; @@ -223,6 +223,7 @@ pub trait GetBlockTemplateRpc { } /// RPC method implementations. +#[derive(Clone)] pub struct GetBlockTemplateRpcImpl< Mempool, State, @@ -232,22 +233,32 @@ pub struct GetBlockTemplateRpcImpl< AddressBook, > where Mempool: Service< - mempool::Request, - Response = mempool::Response, - Error = zebra_node_services::BoxError, - >, + mempool::Request, + Response = mempool::Response, + Error = zebra_node_services::BoxError, + > + Clone + + Send + + Sync + + 'static, + Mempool::Future: Send, State: Service< - zebra_state::ReadRequest, - Response = zebra_state::ReadResponse, - Error = zebra_state::BoxError, - >, + zebra_state::ReadRequest, + Response = zebra_state::ReadResponse, + Error = zebra_state::BoxError, + > + Clone + + Send + + Sync + + 'static, + >::Future: Send, + Tip: ChainTip + Clone + Send + Sync + 'static, BlockVerifierRouter: Service + Clone + Send + Sync + 'static, + >::Future: Send, SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, - AddressBook: AddressBookPeers, + AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, { // Configuration // @@ -270,7 +281,7 @@ pub struct GetBlockTemplateRpcImpl< // Services // /// A handle to the mempool service. - mempool: Buffer, + mempool: Mempool, /// A handle to the state service. state: State, @@ -288,6 +299,48 @@ pub struct GetBlockTemplateRpcImpl< address_book: AddressBook, } +impl Debug + for GetBlockTemplateRpcImpl +where + Mempool: Service< + mempool::Request, + Response = mempool::Response, + Error = zebra_node_services::BoxError, + > + Clone + + Send + + Sync + + 'static, + Mempool::Future: Send, + State: Service< + zebra_state::ReadRequest, + Response = zebra_state::ReadResponse, + Error = zebra_state::BoxError, + > + Clone + + Send + + Sync + + 'static, + >::Future: Send, + Tip: ChainTip + Clone + Send + Sync + 'static, + BlockVerifierRouter: Service + + Clone + + Send + + Sync + + 'static, + >::Future: Send, + SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, + AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + // Skip fields without debug impls + f.debug_struct("GetBlockTemplateRpcImpl") + .field("network", &self.network) + .field("miner_address", &self.miner_address) + .field("extra_coinbase_data", &self.extra_coinbase_data) + .field("debug_like_zcashd", &self.debug_like_zcashd) + .finish() + } +} + impl GetBlockTemplateRpcImpl where @@ -295,7 +348,11 @@ where mempool::Request, Response = mempool::Response, Error = zebra_node_services::BoxError, - > + 'static, + > + Clone + + Send + + Sync + + 'static, + Mempool::Future: Send, State: Service< zebra_state::ReadRequest, Response = zebra_state::ReadResponse, @@ -304,12 +361,14 @@ where + Send + Sync + 'static, + >::Future: Send, Tip: ChainTip + Clone + Send + Sync + 'static, BlockVerifierRouter: Service + Clone + Send + Sync + 'static, + >::Future: Send, SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, { @@ -322,13 +381,24 @@ where pub fn new( network: Network, mining_config: crate::config::mining::Config, - mempool: Buffer, + mempool: Mempool, state: State, latest_chain_tip: Tip, block_verifier_router: BlockVerifierRouter, sync_status: SyncStatus, address_book: AddressBook, ) -> Self { + // Prevent loss of miner funds due to an unsupported or incorrect address type. + if let Some(miner_address) = mining_config.miner_address { + assert_eq!( + miner_address.network(), + network, + "incorrect miner address config: {miner_address} \ + network.network {network} and miner address network {} must match", + miner_address.network(), + ); + } + // A limit on the configured extra coinbase data, regardless of the current block height. // This is different from the consensus rule, which limits the total height + data. const EXTRA_COINBASE_DATA_LIMIT: usize = @@ -378,7 +448,10 @@ where mempool::Request, Response = mempool::Response, Error = zebra_node_services::BoxError, - > + 'static, + > + Clone + + Send + + Sync + + 'static, Mempool::Future: Send, State: Service< zebra_state::ReadRequest, @@ -473,9 +546,7 @@ where async move { get_block_template::check_parameters(¶meters)?; - let client_long_poll_id = parameters - .as_ref() - .and_then(|params| params.long_poll_id.clone()); + let client_long_poll_id = parameters.as_ref().and_then(|params| params.long_poll_id); // - One-off checks @@ -498,6 +569,10 @@ where // - add `async changed()` method to ChainSyncStatus (like `ChainTip`) check_synced_to_tip(network, latest_chain_tip.clone(), sync_status.clone())?; + // TODO: return an error if we have no peers, like `zcashd` does, + // and add a developer config that mines regardless of how many peers we have. + // https://github.com/zcash/zcash/blob/6fdd9f1b81d3b228326c9826fa10696fc516444b/src/miner.cpp#L865-L880 + // We're just about to fetch state data, then maybe wait for any changes. // Mark all the changes before the fetch as seen. // Changes are also ignored in any clones made after the mark. @@ -585,7 +660,9 @@ where )); // Return immediately if the chain tip has changed. - let wait_for_best_tip_change = latest_chain_tip.best_tip_changed(); + // The clone preserves the seen status of the chain tip. + let mut wait_for_best_tip_change = latest_chain_tip.clone(); + let wait_for_best_tip_change = wait_for_best_tip_change.best_tip_changed(); // Wait for the maximum block time to elapse. This can change the block header // on testnet. (On mainnet it can happen due to a network disconnection, or a @@ -612,7 +689,6 @@ where // But the coinbase value depends on the selected transactions, so this needs // further analysis to check if it actually saves us any time. - // TODO: change logging to debug after testing tokio::select! { // Poll the futures in the listed order, for efficiency. // We put the most frequent conditions first. @@ -620,7 +696,7 @@ where // This timer elapses every few seconds _elapsed = wait_for_mempool_request => { - tracing::info!( + tracing::debug!( ?max_time, ?cur_time, ?server_long_poll_id, @@ -634,7 +710,32 @@ where tip_changed_result = wait_for_best_tip_change => { match tip_changed_result { Ok(()) => { - tracing::info!( + // Spurious updates shouldn't happen in the state, because the + // difficulty and hash ordering is a stable total order. But + // since they could cause a busy-loop, guard against them here. + latest_chain_tip.mark_best_tip_seen(); + + let new_tip_hash = latest_chain_tip.best_tip_hash(); + if new_tip_hash == Some(tip_hash) { + tracing::debug!( + ?max_time, + ?cur_time, + ?server_long_poll_id, + ?client_long_poll_id, + ?tip_hash, + ?tip_height, + "ignoring spurious state change notification" + ); + + // Wait for the mempool interval, then check for any changes. + tokio::time::sleep(Duration::from_secs( + GET_BLOCK_TEMPLATE_MEMPOOL_LONG_POLL_INTERVAL, + )).await; + + continue; + } + + tracing::debug!( ?max_time, ?cur_time, ?server_long_poll_id, @@ -644,8 +745,7 @@ where } Err(recv_error) => { - // This log should stay at info when the others go to debug, - // it will help with debugging. + // This log is rare and helps with debugging, so it's ok to be info. tracing::info!( ?recv_error, ?max_time, @@ -668,8 +768,7 @@ where // The max time does not elapse during normal operation on mainnet, // and it rarely elapses on testnet. Some(_elapsed) = wait_for_max_time => { - // This log should stay at info when the others go to debug, - // it's very rare. + // This log is very rare so it's ok to be info. tracing::info!( ?max_time, ?cur_time, diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template.rs b/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template.rs index 617b80080c2..92ab9d00593 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template.rs @@ -346,3 +346,21 @@ pub enum Response { /// `getblocktemplate` RPC request in proposal mode. ProposalMode(ProposalResponse), } + +impl Response { + /// Returns the inner template, if the response is in template mode. + pub fn try_into_template(self) -> Option { + match self { + Response::TemplateMode(template) => Some(*template), + Response::ProposalMode(_) => None, + } + } + + /// Returns the inner proposal, if the response is in proposal mode. + pub fn try_into_proposal(self) -> Option { + match self { + Response::TemplateMode(_) => None, + Response::ProposalMode(proposal) => Some(proposal), + } + } +} diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/long_poll.rs b/zebra-rpc/src/methods/get_block_template_rpcs/types/long_poll.rs index 73ea1c015af..9f9a316166a 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/types/long_poll.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/types/long_poll.rs @@ -113,7 +113,7 @@ impl LongPollInput { /// /// `zcashd` IDs are currently 69 hex/decimal digits long. /// Since Zebra's IDs are only 46 hex/decimal digits, mining pools should be able to handle them. -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] #[serde(try_from = "String", into = "String")] pub struct LongPollId { // Fields that invalidate old work: diff --git a/zebra-rpc/src/methods/tests/prop.rs b/zebra-rpc/src/methods/tests/prop.rs index 154f5d8c973..c6c4fa69693 100644 --- a/zebra-rpc/src/methods/tests/prop.rs +++ b/zebra-rpc/src/methods/tests/prop.rs @@ -45,7 +45,7 @@ proptest! { Mainnet, false, true, - Buffer::new(mempool.clone(), 1), + mempool.clone(), Buffer::new(state.clone(), 1), NoChainTip, ); @@ -100,7 +100,7 @@ proptest! { Mainnet, false, true, - Buffer::new(mempool.clone(), 1), + mempool.clone(), Buffer::new(state.clone(), 1), NoChainTip, ); @@ -160,7 +160,7 @@ proptest! { Mainnet, false, true, - Buffer::new(mempool.clone(), 1), + mempool.clone(), Buffer::new(state.clone(), 1), NoChainTip, ); @@ -228,7 +228,7 @@ proptest! { Mainnet, false, true, - Buffer::new(mempool.clone(), 1), + mempool.clone(), Buffer::new(state.clone(), 1), NoChainTip, ); @@ -285,7 +285,7 @@ proptest! { Mainnet, false, true, - Buffer::new(mempool.clone(), 1), + mempool.clone(), Buffer::new(state.clone(), 1), NoChainTip, ); @@ -340,7 +340,7 @@ proptest! { Mainnet, false, true, - Buffer::new(mempool.clone(), 1), + mempool.clone(), Buffer::new(state.clone(), 1), NoChainTip, ); @@ -441,12 +441,12 @@ proptest! { Mainnet, false, true, - Buffer::new(mempool.clone(), 1), + mempool.clone(), Buffer::new(state.clone(), 1), NoChainTip, ); - let send_task = tokio::spawn(rpc.get_raw_transaction(non_hex_string, 0)); + let send_task = tokio::spawn(rpc.get_raw_transaction(non_hex_string, Some(0))); mempool.expect_no_requests().await?; state.expect_no_requests().await?; @@ -500,12 +500,12 @@ proptest! { Mainnet, false, true, - Buffer::new(mempool.clone(), 1), + mempool.clone(), Buffer::new(state.clone(), 1), NoChainTip, ); - let send_task = tokio::spawn(rpc.get_raw_transaction(hex::encode(random_bytes), 0)); + let send_task = tokio::spawn(rpc.get_raw_transaction(hex::encode(random_bytes), Some(0))); mempool.expect_no_requests().await?; state.expect_no_requests().await?; @@ -548,7 +548,7 @@ proptest! { network, false, true, - Buffer::new(mempool.clone(), 1), + mempool.clone(), Buffer::new(state.clone(), 1), NoChainTip, ); @@ -599,7 +599,7 @@ proptest! { network, false, true, - Buffer::new(mempool.clone(), 1), + mempool.clone(), Buffer::new(state.clone(), 1), chain_tip, ); @@ -686,7 +686,7 @@ proptest! { network, false, true, - Buffer::new(mempool.clone(), 1), + mempool.clone(), Buffer::new(state.clone(), 1), chain_tip, ); @@ -750,7 +750,7 @@ proptest! { network, false, true, - Buffer::new(mempool.clone(), 1), + mempool.clone(), Buffer::new(state.clone(), 1), chain_tip, ); @@ -802,7 +802,7 @@ proptest! { Mainnet, false, true, - Buffer::new(mempool.clone(), 1), + mempool.clone(), Buffer::new(state.clone(), 1), NoChainTip, ); @@ -892,7 +892,7 @@ proptest! { Mainnet, false, true, - Buffer::new(mempool.clone(), 1), + mempool.clone(), Buffer::new(state.clone(), 1), NoChainTip, ); diff --git a/zebra-rpc/src/methods/tests/snapshot.rs b/zebra-rpc/src/methods/tests/snapshot.rs index 97fe46b6ef7..2a8e9149f56 100644 --- a/zebra-rpc/src/methods/tests/snapshot.rs +++ b/zebra-rpc/src/methods/tests/snapshot.rs @@ -8,6 +8,7 @@ use std::{collections::BTreeMap, sync::Arc}; use insta::dynamic_redaction; +use tower::buffer::Buffer; use zebra_chain::{ block::Block, @@ -252,7 +253,7 @@ async fn test_rpc_response_data_for_network(network: Network) { // make the api call let get_raw_transaction = - rpc.get_raw_transaction(first_block_first_transaction.hash().encode_hex(), 0u8); + rpc.get_raw_transaction(first_block_first_transaction.hash().encode_hex(), Some(0u8)); let (response, _) = futures::join!(get_raw_transaction, mempool_req); let get_raw_transaction = response.expect("We should have a GetRawTransaction struct"); @@ -269,7 +270,7 @@ async fn test_rpc_response_data_for_network(network: Network) { // make the api call let get_raw_transaction = - rpc.get_raw_transaction(first_block_first_transaction.hash().encode_hex(), 1u8); + rpc.get_raw_transaction(first_block_first_transaction.hash().encode_hex(), Some(1u8)); let (response, _) = futures::join!(get_raw_transaction, mempool_req); let get_raw_transaction = response.expect("We should have a GetRawTransaction struct"); @@ -338,7 +339,7 @@ async fn test_mocked_rpc_response_data_for_network(network: Network) { network, false, true, - Buffer::new(mempool, 1), + mempool, state.clone(), latest_chain_tip, ); diff --git a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs index 258159cb3d3..e4e6dceaf21 100644 --- a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs @@ -86,21 +86,19 @@ pub async fn test_responses( _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::router::init( - zebra_consensus::Config::default(), - network, - state.clone(), - true, - ) - .await; + ) = zebra_consensus::router::init(zebra_consensus::Config::default(), network, state.clone()) + .await; let mut mock_sync_status = MockSyncStatus::default(); mock_sync_status.set_is_close_to_tip(true); + #[allow(clippy::unnecessary_struct_initialization)] let mining_config = crate::config::mining::Config { miner_address: Some(transparent::Address::from_script_hash(network, [0xad; 20])), extra_coinbase_data: None, debug_like_zcashd: true, + // TODO: Use default field values when optional features are enabled in tests #8183 + //..Default::default() }; // nu5 block height diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 68f08c184b1..105dd50a386 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -423,7 +423,7 @@ async fn rpc_getrawtransaction() { conventional_fee: Amount::zero(), }])); }); - let get_tx_req = rpc.get_raw_transaction(tx.hash().encode_hex(), 0u8); + let get_tx_req = rpc.get_raw_transaction(tx.hash().encode_hex(), Some(0u8)); let (response, _) = futures::join!(get_tx_req, mempool_req); let get_tx = response.expect("We should have a GetRawTransaction struct"); if let GetRawTransaction::Raw(raw_tx) = get_tx { @@ -454,8 +454,8 @@ async fn rpc_getrawtransaction() { let run_state_test_case = |block_idx: usize, block: Arc, tx: Arc| { let read_state = read_state.clone(); let tx_hash = tx.hash(); - let get_tx_verbose_0_req = rpc.get_raw_transaction(tx_hash.encode_hex(), 0u8); - let get_tx_verbose_1_req = rpc.get_raw_transaction(tx_hash.encode_hex(), 1u8); + let get_tx_verbose_0_req = rpc.get_raw_transaction(tx_hash.encode_hex(), Some(0u8)); + let get_tx_verbose_1_req = rpc.get_raw_transaction(tx_hash.encode_hex(), Some(1u8)); async move { let (response, _) = futures::join!(get_tx_verbose_0_req, make_mempool_req(tx_hash)); @@ -874,13 +874,8 @@ async fn rpc_getblockcount() { _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::router::init( - zebra_consensus::Config::default(), - Mainnet, - state.clone(), - true, - ) - .await; + ) = zebra_consensus::router::init(zebra_consensus::Config::default(), Mainnet, state.clone()) + .await; // Init RPC let get_block_template_rpc = GetBlockTemplateRpcImpl::new( @@ -924,13 +919,8 @@ async fn rpc_getblockcount_empty_state() { _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::router::init( - zebra_consensus::Config::default(), - Mainnet, - state.clone(), - true, - ) - .await; + ) = zebra_consensus::router::init(zebra_consensus::Config::default(), Mainnet, state.clone()) + .await; // Init RPC let get_block_template_rpc = get_block_template_rpcs::GetBlockTemplateRpcImpl::new( @@ -976,13 +966,8 @@ async fn rpc_getpeerinfo() { _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::router::init( - zebra_consensus::Config::default(), - network, - state.clone(), - true, - ) - .await; + ) = zebra_consensus::router::init(zebra_consensus::Config::default(), network, state.clone()) + .await; let mock_peer_address = zebra_network::types::MetaAddr::new_initial_peer( std::net::SocketAddr::new( @@ -1051,13 +1036,8 @@ async fn rpc_getblockhash() { _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::router::init( - zebra_consensus::Config::default(), - Mainnet, - state.clone(), - true, - ) - .await; + ) = zebra_consensus::router::init(zebra_consensus::Config::default(), Mainnet, state.clone()) + .await; // Init RPC let get_block_template_rpc = get_block_template_rpcs::GetBlockTemplateRpcImpl::new( @@ -1119,7 +1099,7 @@ async fn rpc_getmininginfo() { let get_block_template_rpc = get_block_template_rpcs::GetBlockTemplateRpcImpl::new( Mainnet, Default::default(), - Buffer::new(MockService::build().for_unit_tests(), 1), + MockService::build().for_unit_tests(), read_state, latest_chain_tip.clone(), MockService::build().for_unit_tests(), @@ -1155,7 +1135,7 @@ async fn rpc_getnetworksolps() { let get_block_template_rpc = get_block_template_rpcs::GetBlockTemplateRpcImpl::new( Mainnet, Default::default(), - Buffer::new(MockService::build().for_unit_tests(), 1), + MockService::build().for_unit_tests(), read_state, latest_chain_tip.clone(), MockService::build().for_unit_tests(), @@ -1251,10 +1231,13 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { true => Some(transparent::Address::from_pub_key_hash(Mainnet, [0x7e; 20])), }; + #[allow(clippy::unnecessary_struct_initialization)] let mining_config = crate::config::mining::Config { miner_address, extra_coinbase_data: None, debug_like_zcashd: true, + // TODO: Use default field values when optional features are enabled in tests #8183 + //..Default::default() }; // nu5 block height @@ -1536,13 +1519,8 @@ async fn rpc_submitblock_errors() { _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::router::init( - zebra_consensus::Config::default(), - Mainnet, - state.clone(), - true, - ) - .await; + ) = zebra_consensus::router::init(zebra_consensus::Config::default(), Mainnet, state.clone()) + .await; // Init RPC let get_block_template_rpc = GetBlockTemplateRpcImpl::new( @@ -1600,7 +1578,7 @@ async fn rpc_validateaddress() { let get_block_template_rpc = get_block_template_rpcs::GetBlockTemplateRpcImpl::new( Mainnet, Default::default(), - Buffer::new(MockService::build().for_unit_tests(), 1), + MockService::build().for_unit_tests(), MockService::build().for_unit_tests(), mock_chain_tip, MockService::build().for_unit_tests(), @@ -1645,7 +1623,7 @@ async fn rpc_z_validateaddress() { let get_block_template_rpc = get_block_template_rpcs::GetBlockTemplateRpcImpl::new( Mainnet, Default::default(), - Buffer::new(MockService::build().for_unit_tests(), 1), + MockService::build().for_unit_tests(), MockService::build().for_unit_tests(), mock_chain_tip, MockService::build().for_unit_tests(), @@ -1702,10 +1680,13 @@ async fn rpc_getdifficulty() { let mut mock_sync_status = MockSyncStatus::default(); mock_sync_status.set_is_close_to_tip(true); + #[allow(clippy::unnecessary_struct_initialization)] let mining_config = Config { miner_address: None, extra_coinbase_data: None, debug_like_zcashd: true, + // TODO: Use default field values when optional features are enabled in tests #8183 + //..Default::default() }; // nu5 block height @@ -1850,7 +1831,7 @@ async fn rpc_z_listunifiedreceivers() { let get_block_template_rpc = get_block_template_rpcs::GetBlockTemplateRpcImpl::new( Mainnet, Default::default(), - Buffer::new(MockService::build().for_unit_tests(), 1), + MockService::build().for_unit_tests(), MockService::build().for_unit_tests(), mock_chain_tip, MockService::build().for_unit_tests(), diff --git a/zebra-rpc/src/queue.rs b/zebra-rpc/src/queue.rs index e8bfa420da2..97662ec7be3 100644 --- a/zebra-rpc/src/queue.rs +++ b/zebra-rpc/src/queue.rs @@ -95,7 +95,7 @@ impl Queue { /// Remove a transaction from the queue. pub fn remove(&mut self, unmined_id: UnminedTxId) { - self.transactions.remove(&unmined_id); + self.transactions.swap_remove(&unmined_id); } /// Remove the oldest transaction from the queue. diff --git a/zebra-rpc/src/server.rs b/zebra-rpc/src/server.rs index 44e6afe84f5..c8e13a9a62f 100644 --- a/zebra-rpc/src/server.rs +++ b/zebra-rpc/src/server.rs @@ -7,13 +7,12 @@ //! See the full list of //! [Differences between JSON-RPC 1.0 and 2.0.](https://www.simple-is-better.org/rpc/#differences-between-1-0-and-2-0) -use std::{fmt, panic}; +use std::{fmt, panic, thread::available_parallelism}; use jsonrpc_core::{Compatibility, MetaIoHandler}; use jsonrpc_http_server::{CloseHandle, ServerBuilder}; use tokio::task::JoinHandle; -use tower::{buffer::Buffer, Service}; - +use tower::Service; use tracing::{Instrument, *}; use zebra_chain::{ @@ -99,7 +98,7 @@ impl RpcServer { mining_config: crate::config::mining::Config, build_version: VersionString, user_agent: UserAgentString, - mempool: Buffer, + mempool: Mempool, state: State, #[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))] block_verifier_router: BlockVerifierRouter, @@ -117,7 +116,10 @@ impl RpcServer { mempool::Request, Response = mempool::Response, Error = zebra_node_services::BoxError, - > + 'static, + > + Clone + + Send + + Sync + + 'static, Mempool::Future: Send, State: Service< zebra_state::ReadRequest, @@ -150,17 +152,6 @@ impl RpcServer { #[cfg(feature = "getblocktemplate-rpcs")] { - // Prevent loss of miner funds due to an unsupported or incorrect address type. - if let Some(miner_address) = mining_config.miner_address { - assert_eq!( - miner_address.network(), - network, - "incorrect miner address config: {miner_address} \ - network.network {network} and miner address network {} must match", - miner_address.network(), - ); - } - // Initialize the getblocktemplate rpc method handler let get_block_template_rpc_impl = GetBlockTemplateRpcImpl::new( network, @@ -196,7 +187,7 @@ impl RpcServer { // If zero, automatically scale threads to the number of CPU cores let mut parallel_cpu_threads = config.parallel_cpu_threads; if parallel_cpu_threads == 0 { - parallel_cpu_threads = num_cpus::get(); + parallel_cpu_threads = available_parallelism().map(usize::from).unwrap_or(1); } // The server is a blocking task, which blocks on executor shutdown. diff --git a/zebra-scan/Cargo.toml b/zebra-scan/Cargo.toml new file mode 100644 index 00000000000..f4955153011 --- /dev/null +++ b/zebra-scan/Cargo.toml @@ -0,0 +1,91 @@ +[package] +name = "zebra-scan" +version = "0.1.0-alpha.4" +authors = ["Zcash Foundation "] +description = "Shielded transaction scanner for the Zcash blockchain" +license = "MIT OR Apache-2.0" +repository = "https://github.com/ZcashFoundation/zebra" +edition = "2021" + +readme = "../README.md" +homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["zebra", "zcash"] +# Must be one of +categories = ["cryptography::cryptocurrencies"] + +[[bin]] # Bin to run the Scanner gRPC server +name = "scanner-grpc-server" +path = "src/bin/rpc_server.rs" +required-features = ["proptest-impl"] + +[features] + +# Production features that activate extra dependencies, or extra features in dependencies + +# Test features + +proptest-impl = [ + "proptest", + "proptest-derive", + "zebra-state/proptest-impl", + "zebra-chain/proptest-impl", + "zebra-test", + "bls12_381", + "ff", + "group", + "jubjub", + "rand", + "zcash_note_encryption", +] + +[dependencies] + +color-eyre = "0.6.2" +indexmap = { version = "2.2.3", features = ["serde"] } +itertools = "0.12.1" +semver = "1.0.22" +serde = { version = "1.0.196", features = ["serde_derive"] } +tokio = { version = "1.36.0", features = ["time"] } +tower = "0.4.13" +tracing = "0.1.39" +futures = "0.3.30" + +zcash_client_backend = "0.10.0-rc.1" +zcash_primitives = "0.13.0-rc.1" + +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.35", features = ["shielded-scan"] } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.35", features = ["shielded-scan"] } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.35", features = ["shielded-scan"] } +zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.2" } + +chrono = { version = "0.4.34", default-features = false, features = ["clock", "std", "serde"] } + +# test feature proptest-impl +proptest = { version = "1.4.0", optional = true } +proptest-derive = { version = "0.4.0", optional = true } + +bls12_381 = { version = "0.8.0", optional = true } +ff = { version = "0.13.0", optional = true } +group = { version = "0.13.0", optional = true } +jubjub = { version = "0.10.0", optional = true } +rand = { version = "0.8.5", optional = true } +zcash_note_encryption = { version = "0.4.0", optional = true } + +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.35", optional = true } + +[dev-dependencies] +insta = { version = "1.33.0", features = ["ron", "redactions"] } +tokio = { version = "1.36.0", features = ["test-util"] } + +proptest = "1.4.0" +proptest-derive = "0.4.0" +bls12_381 = "0.8.0" +ff = "0.13.0" +group = "0.13.0" +jubjub = "0.10.0" +rand = "0.8.5" +zcash_note_encryption = "0.4.0" + +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.35", features = ["proptest-impl"] } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.35" } diff --git a/zebra-scan/src/bin/rpc_server.rs b/zebra-scan/src/bin/rpc_server.rs new file mode 100644 index 00000000000..1463df3f8f0 --- /dev/null +++ b/zebra-scan/src/bin/rpc_server.rs @@ -0,0 +1,20 @@ +//! Runs an RPC server with a mock ScanTask + +use tower::ServiceBuilder; + +use zebra_scan::{service::ScanService, storage::Storage}; + +#[tokio::main] +/// Runs an RPC server with a mock ScanTask +async fn main() -> Result<(), Box> { + let (config, network) = Default::default(); + + let (scan_service, _cmd_receiver) = + ScanService::new_with_mock_scanner(Storage::new(&config, network, false)); + let scan_service = ServiceBuilder::new().buffer(10).service(scan_service); + + // Start the gRPC server. + zebra_grpc::server::init("127.0.0.1:8231".parse()?, scan_service).await?; + + Ok(()) +} diff --git a/zebra-scan/src/config.rs b/zebra-scan/src/config.rs new file mode 100644 index 00000000000..915a5e32342 --- /dev/null +++ b/zebra-scan/src/config.rs @@ -0,0 +1,84 @@ +//! Configuration for blockchain scanning tasks. + +use std::{fmt::Debug, net::SocketAddr}; + +use indexmap::IndexMap; +use serde::{Deserialize, Serialize}; + +use zebra_state::Config as DbConfig; + +use crate::storage::SaplingScanningKey; + +#[derive(Clone, Eq, PartialEq, Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +/// Configuration for scanning. +pub struct Config { + /// The sapling keys to scan for and the birthday height of each of them. + /// + /// Currently only supports Extended Full Viewing Keys in ZIP-32 format. + // + // TODO: allow keys without birthdays + pub sapling_keys_to_scan: IndexMap, + + /// IP address and port for the zebra-scan gRPC server. + /// + /// Note: The gRPC server is disabled by default. + /// To enable the gRPC server, set a listen address in the config: + /// ```toml + /// [shielded-scan] + /// listen_addr = '127.0.0.1:8231' + /// ``` + /// + /// The recommended ports for the gRPC server are: + /// - Mainnet: 127.0.0.1:8231 + /// - Testnet: 127.0.0.1:18231 + pub listen_addr: Option, + + /// The scanner results database config. + // + // TODO: Remove fields that are only used by the state, and create a common database config. + #[serde(flatten)] + db_config: DbConfig, +} + +impl Debug for Config { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Config") + // Security: don't log private keys, birthday heights might also be private + .field("sapling_keys_to_scan", &self.sapling_keys_to_scan.len()) + .field("db_config", &self.db_config) + .finish() + } +} + +impl Default for Config { + fn default() -> Self { + Self { + sapling_keys_to_scan: IndexMap::new(), + listen_addr: None, + + // TODO: Add a const generic for specifying the default cache_dir path, like 'zebra' or 'zebra-scan'? + db_config: DbConfig::default(), + } + } +} + +impl Config { + /// Returns a config for a temporary database that is deleted when it is dropped. + pub fn ephemeral() -> Self { + Self { + db_config: DbConfig::ephemeral(), + ..Self::default() + } + } + + /// Returns the database-specific config. + pub fn db_config(&self) -> &DbConfig { + &self.db_config + } + + /// Returns the database-specific config as mutable. + pub fn db_config_mut(&mut self) -> &mut DbConfig { + &mut self.db_config + } +} diff --git a/zebra-scan/src/init.rs b/zebra-scan/src/init.rs new file mode 100644 index 00000000000..fe001684492 --- /dev/null +++ b/zebra-scan/src/init.rs @@ -0,0 +1,66 @@ +//! Initializing the scanner and gRPC server. + +use std::net::SocketAddr; + +use color_eyre::Report; +use tokio::task::JoinHandle; +use tower::ServiceBuilder; + +use tracing::Instrument; +use zebra_chain::{diagnostic::task::WaitForPanics, parameters::Network}; +use zebra_state::ChainTipChange; + +use crate::{scan, service::ScanService, storage::Storage, Config}; + +/// Initialize [`ScanService`] based on its config. +/// +/// TODO: add a test for this function. +pub async fn init_with_server( + listen_addr: SocketAddr, + config: Config, + network: Network, + state: scan::State, + chain_tip_change: ChainTipChange, +) -> Result<(), Report> { + info!(?config, "starting scan service"); + let scan_service = ServiceBuilder::new() + .buffer(10) + .service(ScanService::new(&config, network, state, chain_tip_change).await); + + // TODO: move this to zebra-grpc init() function and include addr + info!(?listen_addr, "starting scan gRPC server"); + + // Start the gRPC server. + zebra_grpc::server::init(listen_addr, scan_service).await?; + + Ok(()) +} + +/// Initialize the scanner and its gRPC server based on its config, and spawn a task for it. +pub fn spawn_init( + config: Config, + network: Network, + state: scan::State, + chain_tip_change: ChainTipChange, +) -> JoinHandle> { + if let Some(listen_addr) = config.listen_addr { + // TODO: spawn an entirely new executor here, to avoid timing attacks. + tokio::spawn( + init_with_server(listen_addr, config, network, state, chain_tip_change) + .in_current_span(), + ) + } else { + // TODO: spawn an entirely new executor here, to avoid timing attacks. + tokio::spawn( + async move { + let storage = + tokio::task::spawn_blocking(move || Storage::new(&config, network, false)) + .wait_for_panics() + .await; + let (_cmd_sender, cmd_receiver) = tokio::sync::mpsc::channel(1); + scan::start(state, chain_tip_change, storage, cmd_receiver).await + } + .in_current_span(), + ) + } +} diff --git a/zebra-scan/src/lib.rs b/zebra-scan/src/lib.rs new file mode 100644 index 00000000000..e1cf2d93a85 --- /dev/null +++ b/zebra-scan/src/lib.rs @@ -0,0 +1,26 @@ +//! Shielded transaction scanner for the Zcash blockchain. + +#![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] +#![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] +#![doc(html_root_url = "https://docs.rs/zebra_scan")] + +#[macro_use] +extern crate tracing; + +pub mod config; +pub mod init; +pub mod storage; + +use zebra_node_services::scan_service::{request::Request, response::Response}; + +pub mod service; + +pub use service::scan_task::scan; + +#[cfg(any(test, feature = "proptest-impl"))] +pub mod tests; + +pub use config::Config; +pub use init::{init_with_server, spawn_init}; + +pub use zcash_primitives::{sapling::SaplingIvk, zip32::DiversifiableFullViewingKey}; diff --git a/zebra-scan/src/service.rs b/zebra-scan/src/service.rs new file mode 100644 index 00000000000..5c5aeeae1de --- /dev/null +++ b/zebra-scan/src/service.rs @@ -0,0 +1,197 @@ +//! [`tower::Service`] for zebra-scan. + +use std::{collections::BTreeMap, future::Future, pin::Pin, task::Poll, time::Duration}; + +use futures::future::FutureExt; +use tower::Service; + +use zebra_chain::{diagnostic::task::WaitForPanics, parameters::Network, transaction::Hash}; + +use zebra_state::ChainTipChange; + +use crate::{scan, storage::Storage, Config, Request, Response}; + +#[cfg(test)] +mod tests; + +pub mod scan_task; + +pub use scan_task::{ScanTask, ScanTaskCommand}; + +#[cfg(any(test, feature = "proptest-impl"))] +use tokio::sync::mpsc::Receiver; + +/// Zebra-scan [`tower::Service`] +#[derive(Debug)] +pub struct ScanService { + /// On-disk storage + pub db: Storage, + + /// Handle to scan task that's responsible for writing results + scan_task: ScanTask, +} + +/// A timeout applied to `DeleteKeys` requests. +const DELETE_KEY_TIMEOUT: Duration = Duration::from_secs(15); + +impl ScanService { + /// Create a new [`ScanService`]. + pub async fn new( + config: &Config, + network: Network, + state: scan::State, + chain_tip_change: ChainTipChange, + ) -> Self { + let config = config.clone(); + let storage = tokio::task::spawn_blocking(move || Storage::new(&config, network, false)) + .wait_for_panics() + .await; + + Self { + scan_task: ScanTask::spawn(storage.clone(), state, chain_tip_change), + db: storage, + } + } + + /// Create a new [`ScanService`] with a mock `ScanTask` + // TODO: Move this to tests behind `cfg(any(test, feature = "proptest-impl"))` + #[cfg(any(test, feature = "proptest-impl"))] + pub fn new_with_mock_scanner(db: Storage) -> (Self, Receiver) { + let (scan_task, cmd_receiver) = ScanTask::mock(); + (Self { db, scan_task }, cmd_receiver) + } +} + +impl Service for ScanService { + type Response = Response; + type Error = Box; + type Future = + Pin> + Send + 'static>>; + + fn poll_ready(&mut self, _cx: &mut std::task::Context<'_>) -> Poll> { + // TODO: If scan task returns an error, add error to the panic message + assert!( + !self.scan_task.handle.is_finished(), + "scan task finished unexpectedly" + ); + + self.db.check_for_panics(); + + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: Request) -> Self::Future { + if let Err(error) = req.check() { + return async move { Err(error) }.boxed(); + } + + match req { + Request::Info => { + let db = self.db.clone(); + + return async move { + Ok(Response::Info { + min_sapling_birthday_height: db.network().sapling_activation_height(), + }) + } + .boxed(); + } + + Request::CheckKeyHashes(_key_hashes) => { + // TODO: check that these entries exist in db + } + + Request::RegisterKeys(keys) => { + let mut scan_task = self.scan_task.clone(); + + return async move { + Ok(Response::RegisteredKeys( + scan_task.register_keys(keys)?.await?, + )) + } + .boxed(); + } + + Request::DeleteKeys(keys) => { + let mut db = self.db.clone(); + let mut scan_task = self.scan_task.clone(); + + return async move { + // Wait for a message to confirm that the scan task has removed the key up to `DELETE_KEY_TIMEOUT` + let remove_keys_result = + tokio::time::timeout(DELETE_KEY_TIMEOUT, scan_task.remove_keys(&keys)?) + .await + .map_err(|_| "timeout waiting for delete keys done notification"); + + // Delete the key from the database after either confirmation that it's been removed from the scan task, or + // waiting `DELETE_KEY_TIMEOUT`. + let delete_key_task = tokio::task::spawn_blocking(move || { + db.delete_sapling_keys(keys); + }); + + // Return timeout errors or `RecvError`s, or wait for the key to be deleted from the database. + remove_keys_result??; + delete_key_task.await?; + + Ok(Response::DeletedKeys) + } + .boxed(); + } + + Request::Results(keys) => { + let db = self.db.clone(); + + return async move { + let mut final_result = BTreeMap::new(); + for key in keys { + let db = db.clone(); + let mut heights_and_transactions = BTreeMap::new(); + let txs = { + let key = key.clone(); + tokio::task::spawn_blocking(move || db.sapling_results_for_key(&key)) + } + .await?; + txs.iter().for_each(|(k, v)| { + heights_and_transactions + .entry(*k) + .or_insert_with(Vec::new) + .extend(v.iter().map(|x| Hash::from(*x))); + }); + final_result.entry(key).or_insert(heights_and_transactions); + } + + Ok(Response::Results(final_result)) + } + .boxed(); + } + + Request::SubscribeResults(keys) => { + let mut scan_task = self.scan_task.clone(); + + return async move { + let results_receiver = scan_task.subscribe(keys).await?; + + Ok(Response::SubscribeResults(results_receiver)) + } + .boxed(); + } + + Request::ClearResults(keys) => { + let mut db = self.db.clone(); + + return async move { + // Clear results from db for the provided `keys` + tokio::task::spawn_blocking(move || { + db.delete_sapling_results(keys); + }) + .await?; + + Ok(Response::ClearedResults) + } + .boxed(); + } + } + + async move { Ok(Response::Results(BTreeMap::new())) }.boxed() + } +} diff --git a/zebra-scan/src/service/scan_task.rs b/zebra-scan/src/service/scan_task.rs new file mode 100644 index 00000000000..ebdfa70c315 --- /dev/null +++ b/zebra-scan/src/service/scan_task.rs @@ -0,0 +1,45 @@ +//! Types and method implementations for [`ScanTask`] + +use std::sync::Arc; + +use color_eyre::Report; +use tokio::task::JoinHandle; + +use zebra_state::ChainTipChange; + +use crate::storage::Storage; + +mod commands; +mod executor; +pub mod scan; + +pub use commands::ScanTaskCommand; + +#[cfg(any(test, feature = "proptest-impl"))] +pub mod tests; + +#[derive(Debug, Clone)] +/// Scan task handle and command channel sender +pub struct ScanTask { + /// [`JoinHandle`] of scan task + pub handle: Arc>>, + + /// Task command channel sender + pub cmd_sender: tokio::sync::mpsc::Sender, +} + +/// The size of the command channel buffer +const SCAN_TASK_BUFFER_SIZE: usize = 100; + +impl ScanTask { + /// Spawns a new [`ScanTask`]. + pub fn spawn(db: Storage, state: scan::State, chain_tip_change: ChainTipChange) -> Self { + // TODO: Use a bounded channel or move this logic to the scan service or another service. + let (cmd_sender, cmd_receiver) = tokio::sync::mpsc::channel(SCAN_TASK_BUFFER_SIZE); + + Self { + handle: Arc::new(scan::spawn_init(db, state, chain_tip_change, cmd_receiver)), + cmd_sender, + } + } +} diff --git a/zebra-scan/src/service/scan_task/commands.rs b/zebra-scan/src/service/scan_task/commands.rs new file mode 100644 index 00000000000..5a210c1ec52 --- /dev/null +++ b/zebra-scan/src/service/scan_task/commands.rs @@ -0,0 +1,223 @@ +//! Types and method implementations for [`ScanTaskCommand`] + +use std::collections::{HashMap, HashSet}; + +use color_eyre::{eyre::eyre, Report}; +use tokio::sync::{ + mpsc::{error::TrySendError, Receiver, Sender}, + oneshot, +}; + +use tower::BoxError; +use zcash_primitives::{sapling::SaplingIvk, zip32::DiversifiableFullViewingKey}; +use zebra_chain::{block::Height, parameters::Network}; +use zebra_node_services::scan_service::response::ScanResult; +use zebra_state::SaplingScanningKey; + +use crate::scan::sapling_key_to_scan_block_keys; + +use super::ScanTask; + +const RESULTS_SENDER_BUFFER_SIZE: usize = 100; + +#[derive(Debug)] +/// Commands that can be sent to [`ScanTask`] +pub enum ScanTaskCommand { + /// Start scanning for new viewing keys + RegisterKeys { + /// New keys to start scanning for + keys: Vec<(String, Option)>, + /// Returns the set of keys the scanner accepted. + rsp_tx: oneshot::Sender>, + }, + + /// Stop scanning for deleted viewing keys + RemoveKeys { + /// Notify the caller once the key is removed (so the caller can wait before clearing results) + done_tx: oneshot::Sender<()>, + + /// Key hashes that are to be removed + keys: Vec, + }, + + /// Start sending results for key hashes to `result_sender` + SubscribeResults { + /// Key hashes to send the results of to result channel + keys: HashSet, + + /// Returns the result receiver once the subscribed keys have been added. + rsp_tx: oneshot::Sender>, + }, +} + +impl ScanTask { + /// Accepts the scan task's `parsed_key` collection and a reference to the command channel receiver + /// + /// Processes messages in the scan task channel, updating `parsed_keys` if required. + /// + /// Returns newly registered keys for scanning. + pub fn process_messages( + cmd_receiver: &mut tokio::sync::mpsc::Receiver, + registered_keys: &mut HashMap< + SaplingScanningKey, + (Vec, Vec), + >, + network: Network, + ) -> Result< + ( + HashMap< + SaplingScanningKey, + (Vec, Vec, Height), + >, + HashMap>, + Vec<(Receiver, oneshot::Sender>)>, + ), + Report, + > { + use tokio::sync::mpsc::error::TryRecvError; + + let mut new_keys = HashMap::new(); + let mut new_result_senders = HashMap::new(); + let mut new_result_receivers = Vec::new(); + let sapling_activation_height = network.sapling_activation_height(); + + loop { + let cmd = match cmd_receiver.try_recv() { + Ok(cmd) => cmd, + + Err(TryRecvError::Empty) => break, + Err(TryRecvError::Disconnected) => { + // Return early if the sender has been dropped. + return Err(eyre!("command channel disconnected")); + } + }; + + match cmd { + ScanTaskCommand::RegisterKeys { keys, rsp_tx } => { + // Determine what keys we pass to the scanner. + let keys: Vec<_> = keys + .into_iter() + .filter_map(|key| { + // Don't accept keys that: + // 1. the scanner already has, and + // 2. were already submitted. + if registered_keys.contains_key(&key.0) + && !new_keys.contains_key(&key.0) + { + return None; + } + + let birth_height = if let Some(height) = key.1 { + match Height::try_from(height) { + Ok(height) => height, + // Don't accept the key if its birth height is not a valid height. + Err(_) => return None, + } + } else { + // Use the Sapling activation height if the key has no birth height. + sapling_activation_height + }; + + sapling_key_to_scan_block_keys(&key.0, network) + .ok() + .map(|parsed| (key.0, (parsed.0, parsed.1, birth_height))) + }) + .collect(); + + // Send the accepted keys back. + let _ = rsp_tx.send(keys.iter().map(|key| key.0.clone()).collect()); + + new_keys.extend(keys.clone()); + + registered_keys.extend( + keys.into_iter() + .map(|(key, (dfvks, ivks, _))| (key, (dfvks, ivks))), + ); + } + + ScanTaskCommand::RemoveKeys { done_tx, keys } => { + for key in keys { + registered_keys.remove(&key); + new_keys.remove(&key); + } + + // Ignore send errors for the done notification, caller is expected to use a timeout. + let _ = done_tx.send(()); + } + + ScanTaskCommand::SubscribeResults { rsp_tx, keys } => { + let keys: Vec<_> = keys + .into_iter() + .filter(|key| registered_keys.contains_key(key)) + .collect(); + + if keys.is_empty() { + continue; + } + + let (result_sender, result_receiver) = + tokio::sync::mpsc::channel(RESULTS_SENDER_BUFFER_SIZE); + + new_result_receivers.push((result_receiver, rsp_tx)); + + for key in keys { + new_result_senders.insert(key, result_sender.clone()); + } + } + } + } + + Ok((new_keys, new_result_senders, new_result_receivers)) + } + + /// Sends a command to the scan task + pub fn send( + &mut self, + command: ScanTaskCommand, + ) -> Result<(), tokio::sync::mpsc::error::TrySendError> { + self.cmd_sender.try_send(command) + } + + /// Sends a message to the scan task to remove the provided viewing keys. + /// + /// Returns a oneshot channel receiver to notify the caller when the keys have been removed. + pub fn remove_keys( + &mut self, + keys: &[String], + ) -> Result, TrySendError> { + let (done_tx, done_rx) = oneshot::channel(); + + self.send(ScanTaskCommand::RemoveKeys { + keys: keys.to_vec(), + done_tx, + })?; + + Ok(done_rx) + } + + /// Sends a message to the scan task to start scanning for the provided viewing keys. + pub fn register_keys( + &mut self, + keys: Vec<(String, Option)>, + ) -> Result>, TrySendError> { + let (rsp_tx, rsp_rx) = oneshot::channel(); + + self.send(ScanTaskCommand::RegisterKeys { keys, rsp_tx })?; + + Ok(rsp_rx) + } + + /// Sends a message to the scan task to start sending the results for the provided viewing keys to a channel. + /// + /// Returns the channel receiver. + pub async fn subscribe( + &mut self, + keys: HashSet, + ) -> Result, BoxError> { + let (rsp_tx, rsp_rx) = oneshot::channel(); + + self.send(ScanTaskCommand::SubscribeResults { keys, rsp_tx })?; + + Ok(rsp_rx.await?) + } +} diff --git a/zebra-scan/src/service/scan_task/executor.rs b/zebra-scan/src/service/scan_task/executor.rs new file mode 100644 index 00000000000..eb6ecca466c --- /dev/null +++ b/zebra-scan/src/service/scan_task/executor.rs @@ -0,0 +1,58 @@ +//! The scan task executor + +use std::{collections::HashMap, sync::Arc}; + +use color_eyre::eyre::Report; +use futures::{stream::FuturesUnordered, FutureExt, StreamExt}; +use tokio::{ + sync::{ + mpsc::{Receiver, Sender}, + watch, + }, + task::JoinHandle, +}; +use tracing::Instrument; +use zebra_node_services::scan_service::response::ScanResult; + +use super::scan::ScanRangeTaskBuilder; + +const EXECUTOR_BUFFER_SIZE: usize = 100; + +pub fn spawn_init( + subscribed_keys_receiver: watch::Receiver>>>, +) -> (Sender, JoinHandle>) { + let (scan_task_sender, scan_task_receiver) = tokio::sync::mpsc::channel(EXECUTOR_BUFFER_SIZE); + + ( + scan_task_sender, + tokio::spawn( + scan_task_executor(scan_task_receiver, subscribed_keys_receiver).in_current_span(), + ), + ) +} + +pub async fn scan_task_executor( + mut scan_task_receiver: Receiver, + subscribed_keys_receiver: watch::Receiver>>>, +) -> Result<(), Report> { + let mut scan_range_tasks = FuturesUnordered::new(); + + // Push a pending future so that `.next()` will always return `Some` + scan_range_tasks.push(tokio::spawn( + std::future::pending::>().boxed(), + )); + + loop { + tokio::select! { + Some(scan_range_task) = scan_task_receiver.recv() => { + // TODO: Add a long timeout? + scan_range_tasks.push(scan_range_task.spawn(subscribed_keys_receiver.clone())); + } + + Some(finished_task) = scan_range_tasks.next() => { + // Return early if there's an error + finished_task.expect("futures unordered with pending future should always return Some")?; + } + } + } +} diff --git a/zebra-scan/src/service/scan_task/scan.rs b/zebra-scan/src/service/scan_task/scan.rs new file mode 100644 index 00000000000..898183fe135 --- /dev/null +++ b/zebra-scan/src/service/scan_task/scan.rs @@ -0,0 +1,568 @@ +//! The scanner task and scanning APIs. + +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, + time::Duration, +}; + +use color_eyre::{eyre::eyre, Report}; +use itertools::Itertools; +use tokio::{ + sync::{mpsc::Sender, watch}, + task::JoinHandle, +}; +use tower::{buffer::Buffer, util::BoxService, Service, ServiceExt}; + +use tracing::Instrument; +use zcash_client_backend::{ + data_api::ScannedBlock, + encoding::decode_extended_full_viewing_key, + proto::compact_formats::{ + ChainMetadata, CompactBlock, CompactSaplingOutput, CompactSaplingSpend, CompactTx, + }, + scanning::{ScanError, ScanningKey}, +}; +use zcash_primitives::{ + sapling::SaplingIvk, + zip32::{AccountId, DiversifiableFullViewingKey, Scope}, +}; + +use zebra_chain::{ + block::{Block, Height}, + chain_tip::ChainTip, + diagnostic::task::WaitForPanics, + parameters::Network, + serialization::ZcashSerialize, + transaction::Transaction, +}; +use zebra_node_services::scan_service::response::ScanResult; +use zebra_state::{ChainTipChange, SaplingScannedResult, TransactionIndex}; + +use crate::{ + service::{ScanTask, ScanTaskCommand}, + storage::{SaplingScanningKey, Storage}, +}; + +use super::executor; + +mod scan_range; + +pub use scan_range::ScanRangeTaskBuilder; + +/// The generic state type used by the scanner. +pub type State = Buffer< + BoxService, + zebra_state::Request, +>; + +/// Wait a few seconds at startup for some blocks to get verified. +/// +/// But sometimes the state might be empty if the network is slow. +const INITIAL_WAIT: Duration = Duration::from_secs(15); + +/// The amount of time between checking for new blocks and starting new scans. +/// +/// TODO: The current value is set to 10 so that tests don't sleep for too long and finish faster. +/// Set it to 30 after #8250 gets addressed or remove this const completely in the refactor. +pub const CHECK_INTERVAL: Duration = Duration::from_secs(10); + +/// We log an info log with progress after this many blocks. +const INFO_LOG_INTERVAL: u32 = 10_000; + +/// Start a scan task that reads blocks from `state`, scans them with the configured keys in +/// `storage`, and then writes the results to `storage`. +pub async fn start( + state: State, + chain_tip_change: ChainTipChange, + storage: Storage, + mut cmd_receiver: tokio::sync::mpsc::Receiver, +) -> Result<(), Report> { + let network = storage.network(); + let sapling_activation_height = network.sapling_activation_height(); + + info!(?network, "starting scan task"); + + // Do not scan and notify if we are below sapling activation height. + #[cfg(not(test))] + wait_for_height( + sapling_activation_height, + "Sapling activation", + state.clone(), + ) + .await?; + + // Read keys from the storage on disk, which can block async execution. + let key_storage = storage.clone(); + let key_heights = tokio::task::spawn_blocking(move || key_storage.sapling_keys_last_heights()) + .wait_for_panics() + .await; + let key_heights = Arc::new(key_heights); + + let mut height = get_min_height(&key_heights).unwrap_or(sapling_activation_height); + + info!(start_height = ?height, "got min scan height"); + + // Parse and convert keys once, then use them to scan all blocks. + // There is some cryptography here, but it should be fast even with thousands of keys. + let mut parsed_keys: HashMap< + SaplingScanningKey, + (Vec, Vec), + > = key_heights + .keys() + .map(|key| { + let parsed_keys = sapling_key_to_scan_block_keys(key, network)?; + Ok::<_, Report>((key.clone(), parsed_keys)) + }) + .try_collect()?; + + let mut subscribed_keys: HashMap> = HashMap::new(); + + let (subscribed_keys_sender, subscribed_keys_receiver) = + tokio::sync::watch::channel(Arc::new(subscribed_keys.clone())); + + let (scan_task_sender, scan_task_executor_handle) = + executor::spawn_init(subscribed_keys_receiver.clone()); + let mut scan_task_executor_handle = Some(scan_task_executor_handle); + + // Give empty states time to verify some blocks before we start scanning. + tokio::time::sleep(INITIAL_WAIT).await; + + loop { + if let Some(handle) = scan_task_executor_handle { + if handle.is_finished() { + warn!("scan task finished unexpectedly"); + + handle.await?.map_err(|err| eyre!(err))?; + return Ok(()); + } else { + scan_task_executor_handle = Some(handle); + } + } + + let was_parsed_keys_empty = parsed_keys.is_empty(); + + let (new_keys, new_result_senders, new_result_receivers) = + ScanTask::process_messages(&mut cmd_receiver, &mut parsed_keys, network)?; + + subscribed_keys.extend(new_result_senders); + // Drop any results senders that are closed from subscribed_keys + subscribed_keys.retain(|key, sender| !sender.is_closed() && parsed_keys.contains_key(key)); + + // Send the latest version of `subscribed_keys` before spawning the scan range task + subscribed_keys_sender + .send(Arc::new(subscribed_keys.clone())) + .expect("last receiver should not be dropped while this task is running"); + + for (result_receiver, rsp_tx) in new_result_receivers { + // Ignore send errors, we drop any closed results channels above. + let _ = rsp_tx.send(result_receiver); + } + + if !new_keys.is_empty() { + let state = state.clone(); + let storage = storage.clone(); + + let start_height = new_keys + .iter() + .map(|(_, (_, _, height))| *height) + .min() + .unwrap_or(sapling_activation_height); + + if was_parsed_keys_empty { + info!(?start_height, "setting new start height"); + height = start_height; + } + // Skip spawning ScanRange task if `start_height` is at or above the current height + else if start_height < height { + scan_task_sender + .send(ScanRangeTaskBuilder::new(height, new_keys, state, storage)) + .await + .expect("scan_until_task channel should not be closed"); + } + } + + if !parsed_keys.is_empty() { + let scanned_height = scan_height_and_store_results( + height, + state.clone(), + Some(chain_tip_change.clone()), + storage.clone(), + key_heights.clone(), + parsed_keys.clone(), + subscribed_keys_receiver.clone(), + ) + .await?; + + // If we've reached the tip, sleep for a while then try and get the same block. + if scanned_height.is_none() { + tokio::time::sleep(CHECK_INTERVAL).await; + continue; + } + } else { + tokio::time::sleep(CHECK_INTERVAL).await; + continue; + } + + height = height + .next() + .expect("a valid blockchain never reaches the max height"); + } +} + +/// Polls state service for tip height every [`CHECK_INTERVAL`] until the tip reaches the provided `tip_height` +pub async fn wait_for_height( + height: Height, + height_name: &'static str, + state: State, +) -> Result<(), Report> { + loop { + let tip_height = tip_height(state.clone()).await?; + if tip_height < height { + info!( + "scanner is waiting for {height_name}. Current tip: {}, {height_name}: {}", + tip_height.0, height.0 + ); + + tokio::time::sleep(CHECK_INTERVAL).await; + } else { + info!( + "scanner finished waiting for {height_name}. Current tip: {}, {height_name}: {}", + tip_height.0, height.0 + ); + + break; + } + } + + Ok(()) +} + +/// Get the block at `height` from `state`, scan it with the keys in `parsed_keys`, and store the +/// results in `storage`. If `height` is lower than the `key_birthdays` for that key, skip it. +/// +/// Returns: +/// - `Ok(Some(height))` if the height was scanned, +/// - `Ok(None)` if the height was not in the state, and +/// - `Err(error)` on fatal errors. +pub async fn scan_height_and_store_results( + height: Height, + mut state: State, + chain_tip_change: Option, + storage: Storage, + key_last_scanned_heights: Arc>, + parsed_keys: HashMap, Vec)>, + subscribed_keys_receiver: watch::Receiver>>>, +) -> Result, Report> { + let network = storage.network(); + + // Only log at info level every 100,000 blocks. + // + // TODO: also log progress every 5 minutes once we reach the tip? + let is_info_log = height.0 % INFO_LOG_INTERVAL == 0; + + // Get a block from the state. + // We can't use ServiceExt::oneshot() here, because it causes lifetime errors in init(). + let block = state + .ready() + .await + .map_err(|e| eyre!(e))? + .call(zebra_state::Request::Block(height.into())) + .await + .map_err(|e| eyre!(e))?; + + let block = match block { + zebra_state::Response::Block(Some(block)) => block, + zebra_state::Response::Block(None) => return Ok(None), + _ => unreachable!("unmatched response to a state::Block request"), + }; + + for (key_index_in_task, (sapling_key, (dfvks, ivks))) in parsed_keys.into_iter().enumerate() { + match key_last_scanned_heights.get(&sapling_key) { + // Only scan what was not scanned for each key + Some(last_scanned_height) if height <= *last_scanned_height => continue, + + Some(last_scanned_height) if is_info_log => { + if let Some(chain_tip_change) = &chain_tip_change { + // # Security + // + // We can't log `sapling_key` here because it is a private viewing key. Anyone who reads + // the logs could use the key to view those transactions. + info!( + "Scanning the blockchain for key {}, started at block {:?}, now at block {:?}, current tip {:?}", + key_index_in_task, last_scanned_height.next().expect("height is not maximum").as_usize(), + height.as_usize(), + chain_tip_change.latest_chain_tip().best_tip_height().expect("we should have a tip to scan").as_usize(), + ); + } else { + info!( + "Scanning the blockchain for key {}, started at block {:?}, now at block {:?}", + key_index_in_task, last_scanned_height.next().expect("height is not maximum").as_usize(), + height.as_usize(), + ); + } + } + + _other => {} + }; + + let subscribed_keys_receiver = subscribed_keys_receiver.clone(); + + let sapling_key = sapling_key.clone(); + let block = block.clone(); + let mut storage = storage.clone(); + + // We use a dummy size of the Sapling note commitment tree. + // + // We can't set the size to zero, because the underlying scanning function would return + // `zcash_client_backeng::scanning::ScanError::TreeSizeUnknown`. + // + // And we can't set them close to 0, because the scanner subtracts the number of notes + // in the block, and panics with "attempt to subtract with overflow". The number of + // notes in a block must be less than this value, this is a consensus rule. + // + // TODO: use the real sapling tree size: `zs::Response::SaplingTree().position() + 1` + let sapling_tree_size = 1 << 16; + + tokio::task::spawn_blocking(move || { + let dfvk_res = + scan_block(network, &block, sapling_tree_size, &dfvks).map_err(|e| eyre!(e))?; + let ivk_res = + scan_block(network, &block, sapling_tree_size, &ivks).map_err(|e| eyre!(e))?; + + let dfvk_res = scanned_block_to_db_result(dfvk_res); + let ivk_res = scanned_block_to_db_result(ivk_res); + + let latest_subscribed_keys = subscribed_keys_receiver.borrow().clone(); + if let Some(results_sender) = latest_subscribed_keys.get(&sapling_key).cloned() { + let results = dfvk_res.iter().chain(ivk_res.iter()); + + for (_tx_index, &tx_id) in results { + // TODO: Handle `SendErrors` by dropping sender from `subscribed_keys` + let _ = results_sender.try_send(ScanResult { + key: sapling_key.clone(), + height, + tx_id: tx_id.into(), + }); + } + } + + storage.add_sapling_results(&sapling_key, height, dfvk_res); + storage.add_sapling_results(&sapling_key, height, ivk_res); + + Ok::<_, Report>(()) + }) + .wait_for_panics() + .await?; + } + + Ok(Some(height)) +} + +/// Returns the transactions from `block` belonging to the given `scanning_keys`. +/// This list of keys should come from a single configured `SaplingScanningKey`. +/// +/// For example, there are two individual viewing keys for most shielded transfers: +/// - the payment (external) key, and +/// - the change (internal) key. +/// +/// # Performance / Hangs +/// +/// This method can block while reading database files, so it must be inside spawn_blocking() +/// in async code. +/// +/// TODO: +/// - Pass the real `sapling_tree_size` parameter from the state. +/// - Add other prior block metadata. +pub fn scan_block( + network: Network, + block: &Block, + sapling_tree_size: u32, + scanning_keys: &[K], +) -> Result, ScanError> { + // TODO: Implement a check that returns early when the block height is below the Sapling + // activation height. + + let network: zcash_primitives::consensus::Network = network.into(); + + let chain_metadata = ChainMetadata { + sapling_commitment_tree_size: sapling_tree_size, + // Orchard is not supported at the moment so the tree size can be 0. + orchard_commitment_tree_size: 0, + }; + + // Use a dummy `AccountId` as we don't use accounts yet. + let dummy_account = AccountId::from(0); + let scanning_keys: Vec<_> = scanning_keys + .iter() + .map(|key| (&dummy_account, key)) + .collect(); + + zcash_client_backend::scanning::scan_block( + &network, + block_to_compact(block, chain_metadata), + scanning_keys.as_slice(), + // Ignore whether notes are change from a viewer's own spends for now. + &[], + // Ignore previous blocks for now. + None, + ) +} + +/// Converts a Zebra-format scanning key into some `scan_block()` keys. +/// +/// Currently only accepts extended full viewing keys, and returns both their diversifiable full +/// viewing key and their individual viewing key, for testing purposes. +/// +// TODO: work out what string format is used for SaplingIvk, if any, and support it here +// performance: stop returning both the dfvk and ivk for the same key +// TODO: use `ViewingKey::parse` from zebra-chain instead +pub fn sapling_key_to_scan_block_keys( + key: &SaplingScanningKey, + network: Network, +) -> Result<(Vec, Vec), Report> { + let efvk = + decode_extended_full_viewing_key(network.sapling_efvk_hrp(), key).map_err(|e| eyre!(e))?; + + // Just return all the keys for now, so we can be sure our code supports them. + let dfvk = efvk.to_diversifiable_full_viewing_key(); + let eivk = dfvk.to_ivk(Scope::External); + let iivk = dfvk.to_ivk(Scope::Internal); + + Ok((vec![dfvk], vec![eivk, iivk])) +} + +/// Converts a zebra block and meta data into a compact block. +pub fn block_to_compact(block: &Block, chain_metadata: ChainMetadata) -> CompactBlock { + CompactBlock { + height: block + .coinbase_height() + .expect("verified block should have a valid height") + .0 + .into(), + // TODO: performance: look up the block hash from the state rather than recalculating it + hash: block.hash().bytes_in_display_order().to_vec(), + prev_hash: block + .header + .previous_block_hash + .bytes_in_display_order() + .to_vec(), + time: block + .header + .time + .timestamp() + .try_into() + .expect("unsigned 32-bit times should work until 2105"), + header: block + .header + .zcash_serialize_to_vec() + .expect("verified block should serialize"), + vtx: block + .transactions + .iter() + .cloned() + .enumerate() + .map(transaction_to_compact) + .collect(), + chain_metadata: Some(chain_metadata), + + // The protocol version is used for the gRPC wire format, so it isn't needed here. + proto_version: 0, + } +} + +/// Converts a zebra transaction into a compact transaction. +fn transaction_to_compact((index, tx): (usize, Arc)) -> CompactTx { + CompactTx { + index: index + .try_into() + .expect("tx index in block should fit in u64"), + // TODO: performance: look up the tx hash from the state rather than recalculating it + hash: tx.hash().bytes_in_display_order().to_vec(), + + // `fee` is not checked by the `scan_block` function. It is allowed to be unset. + // + fee: 0, + + spends: tx + .sapling_nullifiers() + .map(|nf| CompactSaplingSpend { + nf: <[u8; 32]>::from(*nf).to_vec(), + }) + .collect(), + + // > output encodes the cmu field, ephemeralKey field, and a 52-byte prefix of the encCiphertext field of a Sapling Output + // + // + outputs: tx + .sapling_outputs() + .map(|output| CompactSaplingOutput { + cmu: output.cm_u.to_bytes().to_vec(), + ephemeral_key: output + .ephemeral_key + .zcash_serialize_to_vec() + .expect("verified output should serialize successfully"), + ciphertext: output + .enc_ciphertext + .zcash_serialize_to_vec() + .expect("verified output should serialize successfully") + .into_iter() + .take(52) + .collect(), + }) + .collect(), + + // `actions` is not checked by the `scan_block` function. + actions: vec![], + } +} + +/// Convert a scanned block to a list of scanner database results. +fn scanned_block_to_db_result( + scanned_block: ScannedBlock, +) -> BTreeMap { + scanned_block + .transactions() + .iter() + .map(|tx| { + ( + TransactionIndex::from_usize(tx.index), + SaplingScannedResult::from_bytes_in_display_order(*tx.txid.as_ref()), + ) + }) + .collect() +} + +/// Get the minimal height available in a key_heights map. +fn get_min_height(map: &HashMap) -> Option { + map.values().cloned().min() +} + +/// Get tip height or return genesis block height if no tip is available. +async fn tip_height(mut state: State) -> Result { + let tip = state + .ready() + .await + .map_err(|e| eyre!(e))? + .call(zebra_state::Request::Tip) + .await + .map_err(|e| eyre!(e))?; + + match tip { + zebra_state::Response::Tip(Some((height, _hash))) => Ok(height), + zebra_state::Response::Tip(None) => Ok(Height(0)), + _ => unreachable!("unmatched response to a state::Tip request"), + } +} + +/// Initialize the scanner based on its config, and spawn a task for it. +/// +/// TODO: add a test for this function. +pub fn spawn_init( + storage: Storage, + state: State, + chain_tip_change: ChainTipChange, + cmd_receiver: tokio::sync::mpsc::Receiver, +) -> JoinHandle> { + tokio::spawn(start(state, chain_tip_change, storage, cmd_receiver).in_current_span()) +} diff --git a/zebra-scan/src/service/scan_task/scan/scan_range.rs b/zebra-scan/src/service/scan_task/scan/scan_range.rs new file mode 100644 index 00000000000..b4b75cfe560 --- /dev/null +++ b/zebra-scan/src/service/scan_task/scan/scan_range.rs @@ -0,0 +1,148 @@ +//! Functions for registering new keys in the scan task + +use std::{collections::HashMap, sync::Arc}; + +use crate::{ + scan::{get_min_height, scan_height_and_store_results, wait_for_height, State, CHECK_INTERVAL}, + storage::Storage, +}; +use color_eyre::eyre::Report; +use tokio::{ + sync::{mpsc::Sender, watch}, + task::JoinHandle, +}; +use tracing::Instrument; +use zcash_primitives::{sapling::SaplingIvk, zip32::DiversifiableFullViewingKey}; +use zebra_chain::block::Height; +use zebra_node_services::scan_service::response::ScanResult; +use zebra_state::SaplingScanningKey; + +/// A builder for a scan until task +pub struct ScanRangeTaskBuilder { + /// The range of block heights that should be scanned for these keys + // TODO: Remove start heights from keys and require that all keys per task use the same start height + height_range: std::ops::Range, + + /// The keys to be used for scanning blocks in this task + keys: HashMap, Vec, Height)>, + + /// A handle to the state service for reading the blocks and the chain tip height + state: State, + + /// A handle to the zebra-scan database for storing results + storage: Storage, +} + +impl ScanRangeTaskBuilder { + /// Creates a new [`ScanRangeTaskBuilder`] + pub fn new( + stop_height: Height, + keys: HashMap< + SaplingScanningKey, + (Vec, Vec, Height), + >, + state: State, + storage: Storage, + ) -> Self { + Self { + height_range: Height::MIN..stop_height, + keys, + state, + storage, + } + } + + /// Spawns a `scan_range()` task and returns its [`JoinHandle`] + // TODO: return a tuple with a shutdown sender + pub fn spawn( + self, + subscribed_keys_receiver: watch::Receiver>>>, + ) -> JoinHandle> { + let Self { + height_range, + keys, + state, + storage, + } = self; + + tokio::spawn( + scan_range( + height_range.end, + keys, + state, + storage, + subscribed_keys_receiver, + ) + .in_current_span(), + ) + } +} + +/// Start a scan task that reads blocks from `state` within the provided height range, +/// scans them with the configured keys in `storage`, and then writes the results to `storage`. +// TODO: update the first parameter to `std::ops::Range` +pub async fn scan_range( + stop_before_height: Height, + keys: HashMap, Vec, Height)>, + state: State, + storage: Storage, + subscribed_keys_receiver: watch::Receiver>>>, +) -> Result<(), Report> { + let sapling_activation_height = storage.network().sapling_activation_height(); + // Do not scan and notify if we are below sapling activation height. + wait_for_height( + sapling_activation_height, + "Sapling activation", + state.clone(), + ) + .await?; + + let key_heights: HashMap = keys + .iter() + .map(|(key, (_, _, height))| (key.clone(), *height)) + .collect(); + + let mut height = get_min_height(&key_heights).unwrap_or(sapling_activation_height); + + let key_heights = Arc::new(key_heights); + + // Parse and convert keys once, then use them to scan all blocks. + let parsed_keys: HashMap< + SaplingScanningKey, + (Vec, Vec), + > = keys + .into_iter() + .map(|(key, (decoded_dfvks, decoded_ivks, _h))| (key, (decoded_dfvks, decoded_ivks))) + .collect(); + + while height < stop_before_height { + let scanned_height = scan_height_and_store_results( + height, + state.clone(), + None, + storage.clone(), + key_heights.clone(), + parsed_keys.clone(), + subscribed_keys_receiver.clone(), + ) + .await?; + + // If we've reached the tip, sleep for a while then try and get the same block. + if scanned_height.is_none() { + tokio::time::sleep(CHECK_INTERVAL).await; + continue; + } + + height = height + .next() + .expect("a valid blockchain never reaches the max height"); + } + + info!( + start_height = ?height, + ?stop_before_height, + "finished scanning range" + ); + + Ok(()) +} diff --git a/zebra-scan/src/service/scan_task/tests.rs b/zebra-scan/src/service/scan_task/tests.rs new file mode 100644 index 00000000000..1fadcf13677 --- /dev/null +++ b/zebra-scan/src/service/scan_task/tests.rs @@ -0,0 +1,23 @@ +//! Tests for the scan task. + +use std::sync::Arc; + +use super::{ScanTask, ScanTaskCommand, SCAN_TASK_BUFFER_SIZE}; + +#[cfg(test)] +mod vectors; + +impl ScanTask { + /// Spawns a new [`ScanTask`] for tests. + pub fn mock() -> (Self, tokio::sync::mpsc::Receiver) { + let (cmd_sender, cmd_receiver) = tokio::sync::mpsc::channel(SCAN_TASK_BUFFER_SIZE); + + ( + Self { + handle: Arc::new(tokio::spawn(std::future::pending())), + cmd_sender, + }, + cmd_receiver, + ) + } +} diff --git a/zebra-scan/src/service/scan_task/tests/vectors.rs b/zebra-scan/src/service/scan_task/tests/vectors.rs new file mode 100644 index 00000000000..9022da7948c --- /dev/null +++ b/zebra-scan/src/service/scan_task/tests/vectors.rs @@ -0,0 +1,205 @@ +//! Fixed test vectors for the scan task. + +use std::{ + collections::{HashMap, HashSet}, + time::Duration, +}; + +use color_eyre::Report; + +use zebra_chain::{block::Height, transaction}; +use zebra_node_services::scan_service::response::ScanResult; + +use crate::{service::ScanTask, tests::mock_sapling_scanning_keys}; + +/// Test that [`ScanTask::process_messages`] adds and removes keys as expected for `RegisterKeys` and `DeleteKeys` command +#[tokio::test] +async fn scan_task_processes_messages_correctly() -> Result<(), Report> { + let (mut mock_scan_task, mut cmd_receiver) = ScanTask::mock(); + let mut parsed_keys = HashMap::new(); + let network = Default::default(); + + // Send some keys to be registered + let num_keys = 10; + let sapling_keys = + mock_sapling_scanning_keys(num_keys.try_into().expect("should fit in u8"), network); + let sapling_keys_with_birth_heights: Vec<(String, Option)> = + sapling_keys.into_iter().zip((0..).map(Some)).collect(); + mock_scan_task.register_keys(sapling_keys_with_birth_heights.clone())?; + + let (new_keys, _new_results_senders, _new_results_receivers) = + ScanTask::process_messages(&mut cmd_receiver, &mut parsed_keys, network)?; + + // Check that it updated parsed_keys correctly and returned the right new keys when starting with an empty state + + assert_eq!( + new_keys.len(), + num_keys, + "should add all received keys to new keys" + ); + + assert_eq!( + parsed_keys.len(), + num_keys, + "should add all received keys to parsed keys" + ); + + mock_scan_task.register_keys(sapling_keys_with_birth_heights.clone())?; + + // Check that no key should be added if they are all already known and the heights are the same + + let (new_keys, _new_results_senders, _new_results_receivers) = + ScanTask::process_messages(&mut cmd_receiver, &mut parsed_keys, network)?; + + assert_eq!( + parsed_keys.len(), + num_keys, + "should not add existing keys to parsed keys" + ); + + assert!( + new_keys.is_empty(), + "should not return known keys as new keys" + ); + + // Check that keys can't be overridden. + + let sapling_keys = mock_sapling_scanning_keys(20, network); + let sapling_keys_with_birth_heights: Vec<(String, Option)> = sapling_keys + .clone() + .into_iter() + .map(|key| (key, Some(0))) + .collect(); + + mock_scan_task.register_keys(sapling_keys_with_birth_heights[10..20].to_vec())?; + mock_scan_task.register_keys(sapling_keys_with_birth_heights[10..15].to_vec())?; + + let (new_keys, _new_results_senders, _new_results_receivers) = + ScanTask::process_messages(&mut cmd_receiver, &mut parsed_keys, network)?; + + assert_eq!( + parsed_keys.len(), + 20, + "should not add existing keys to parsed keys" + ); + + assert_eq!( + new_keys.len(), + 10, + "should add 10 of received keys to new keys" + ); + + // Check that it removes keys correctly + + let sapling_keys = mock_sapling_scanning_keys(30, network); + let done_rx = mock_scan_task.remove_keys(&sapling_keys)?; + + let (new_keys, _new_results_senders, _new_results_receivers) = + ScanTask::process_messages(&mut cmd_receiver, &mut parsed_keys, network)?; + + // Check that it sends the done notification successfully before returning and dropping `done_tx` + done_rx.await?; + + assert!( + parsed_keys.is_empty(), + "all parsed keys should have been removed" + ); + + assert!(new_keys.is_empty(), "there should be no new keys"); + + // Check that it doesn't return removed keys as new keys when processing a batch of messages + + mock_scan_task.register_keys(sapling_keys_with_birth_heights.clone())?; + + mock_scan_task.remove_keys(&sapling_keys)?; + + let (new_keys, _new_results_senders, _new_results_receivers) = + ScanTask::process_messages(&mut cmd_receiver, &mut parsed_keys, network)?; + + assert!( + new_keys.is_empty(), + "all registered keys should be removed before process_messages returns" + ); + + // Check that it does return registered keys if they were removed in a prior message when processing a batch of messages + + mock_scan_task.register_keys(sapling_keys_with_birth_heights.clone())?; + + mock_scan_task.remove_keys(&sapling_keys)?; + + mock_scan_task.register_keys(sapling_keys_with_birth_heights[..2].to_vec())?; + + let (new_keys, _new_results_senders, _new_results_receivers) = + ScanTask::process_messages(&mut cmd_receiver, &mut parsed_keys, network)?; + + assert_eq!( + new_keys.len(), + 2, + "should return 2 keys as new_keys after removals" + ); + + assert_eq!( + parsed_keys.len(), + 2, + "should add 2 keys to parsed_keys after removals" + ); + + let subscribe_keys: HashSet = sapling_keys[..5].iter().cloned().collect(); + let result_receiver_fut = { + let mut mock_scan_task = mock_scan_task.clone(); + tokio::spawn(async move { mock_scan_task.subscribe(subscribe_keys.clone()).await }) + }; + + // Wait for spawned task to send subscribe message + tokio::time::sleep(Duration::from_secs(1)).await; + + let (_new_keys, new_results_senders, new_results_receivers) = + ScanTask::process_messages(&mut cmd_receiver, &mut parsed_keys, network)?; + + let (result_receiver, rsp_tx) = new_results_receivers + .into_iter() + .next() + .expect("there should be a new result receiver"); + + rsp_tx + .send(result_receiver) + .expect("should send response successfully"); + + let mut result_receiver = result_receiver_fut + .await + .expect("tokio task should join successfully") + .expect("should send and receive message successfully"); + + let processed_subscribe_keys: HashSet = new_results_senders.keys().cloned().collect(); + let expected_new_subscribe_keys: HashSet = sapling_keys[..2].iter().cloned().collect(); + + assert_eq!( + processed_subscribe_keys, expected_new_subscribe_keys, + "should return new result senders for registered keys" + ); + + for sender in new_results_senders.values() { + // send a fake tx id for each key + sender + .send(ScanResult { + key: String::new(), + height: Height::MIN, + tx_id: transaction::Hash([0; 32]), + }) + .await?; + } + + let mut num_results = 0; + + while result_receiver.try_recv().is_ok() { + num_results += 1; + } + + assert_eq!( + num_results, + expected_new_subscribe_keys.len(), + "there should be a fake result sent for each subscribed key" + ); + + Ok(()) +} diff --git a/zebra-scan/src/service/tests.rs b/zebra-scan/src/service/tests.rs new file mode 100644 index 00000000000..0ae17283083 --- /dev/null +++ b/zebra-scan/src/service/tests.rs @@ -0,0 +1,333 @@ +//! Tests for ScanService. + +use tokio::sync::mpsc::error::TryRecvError; +use tower::{Service, ServiceBuilder, ServiceExt}; + +use color_eyre::{eyre::eyre, Result}; + +use zebra_chain::{block::Height, parameters::Network}; +use zebra_node_services::scan_service::{request::Request, response::Response}; +use zebra_state::TransactionIndex; + +use crate::{ + service::{scan_task::ScanTaskCommand, ScanService}, + storage::db::tests::{fake_sapling_results, new_test_storage}, + tests::{mock_sapling_scanning_keys, ZECPAGES_SAPLING_VIEWING_KEY}, + Config, +}; + +/// Tests that keys are deleted correctly +#[tokio::test] +pub async fn scan_service_deletes_keys_correctly() -> Result<()> { + let mut db = new_test_storage(Network::Mainnet); + + let zec_pages_sapling_efvk = ZECPAGES_SAPLING_VIEWING_KEY.to_string(); + + for fake_result_height in [Height::MIN, Height(1), Height::MAX] { + db.insert_sapling_results( + &zec_pages_sapling_efvk, + fake_result_height, + fake_sapling_results([ + TransactionIndex::MIN, + TransactionIndex::from_index(40), + TransactionIndex::MAX, + ]), + ); + } + + assert!( + !db.sapling_results(&zec_pages_sapling_efvk).is_empty(), + "there should be some results for this key in the db" + ); + + let (mut scan_service, mut cmd_receiver) = ScanService::new_with_mock_scanner(db); + + let response_fut = scan_service + .ready() + .await + .map_err(|err| eyre!(err))? + .call(Request::DeleteKeys(vec![zec_pages_sapling_efvk.clone()])); + + let expected_keys = vec![zec_pages_sapling_efvk.clone()]; + let cmd_handler_fut = tokio::spawn(async move { + let Some(ScanTaskCommand::RemoveKeys { done_tx, keys }) = cmd_receiver.recv().await else { + panic!("should successfully receive RemoveKeys message"); + }; + + assert_eq!(keys, expected_keys, "keys should match the request keys"); + + done_tx.send(()).expect("send should succeed"); + }); + + // Poll futures + let (response, join_result) = tokio::join!(response_fut, cmd_handler_fut); + join_result?; + + match response.map_err(|err| eyre!(err))? { + Response::DeletedKeys => {} + _ => panic!("scan service returned unexpected response variant"), + }; + + assert!( + scan_service + .db + .sapling_results(&zec_pages_sapling_efvk) + .is_empty(), + "all results for this key should have been deleted" + ); + + Ok(()) +} + +/// Tests that keys are deleted correctly +#[tokio::test] +pub async fn scan_service_subscribes_to_results_correctly() -> Result<()> { + let db = new_test_storage(Network::Mainnet); + + let (mut scan_service, mut cmd_receiver) = ScanService::new_with_mock_scanner(db); + + let keys = [String::from("fake key")]; + + let response_fut = scan_service + .ready() + .await + .map_err(|err| eyre!(err))? + .call(Request::SubscribeResults(keys.iter().cloned().collect())); + + let expected_keys = keys.iter().cloned().collect(); + let cmd_handler_fut = tokio::spawn(async move { + let Some(ScanTaskCommand::SubscribeResults { rsp_tx, keys }) = cmd_receiver.recv().await + else { + panic!("should successfully receive SubscribeResults message"); + }; + + let (_results_sender, results_receiver) = tokio::sync::mpsc::channel(1); + rsp_tx + .send(results_receiver) + .expect("should send response successfully"); + assert_eq!(keys, expected_keys, "keys should match the request keys"); + }); + + // Poll futures + let (response, join_result) = tokio::join!(response_fut, cmd_handler_fut); + join_result?; + + let mut results_receiver = match response.map_err(|err| eyre!(err))? { + Response::SubscribeResults(results_receiver) => results_receiver, + _ => panic!("scan service returned unexpected response variant"), + }; + + assert_eq!( + results_receiver.try_recv(), + Err(TryRecvError::Disconnected), + "channel with no items and dropped sender should be closed" + ); + + Ok(()) +} + +/// Tests that results are cleared are deleted correctly +#[tokio::test] +pub async fn scan_service_clears_results_correctly() -> Result<()> { + let mut db = new_test_storage(Network::Mainnet); + + let zec_pages_sapling_efvk = ZECPAGES_SAPLING_VIEWING_KEY.to_string(); + + for fake_result_height in [Height::MIN, Height(1), Height::MAX] { + db.insert_sapling_results( + &zec_pages_sapling_efvk, + fake_result_height, + fake_sapling_results([ + TransactionIndex::MIN, + TransactionIndex::from_index(40), + TransactionIndex::MAX, + ]), + ); + } + + assert!( + !db.sapling_results(&zec_pages_sapling_efvk).is_empty(), + "there should be some results for this key in the db" + ); + + let (mut scan_service, _cmd_receiver) = ScanService::new_with_mock_scanner(db.clone()); + + let response = scan_service + .ready() + .await + .map_err(|err| eyre!(err))? + .call(Request::ClearResults(vec![zec_pages_sapling_efvk.clone()])) + .await + .map_err(|err| eyre!(err))?; + + match response { + Response::ClearedResults => {} + _ => panic!("scan service returned unexpected response variant"), + }; + + assert_eq!( + db.sapling_results(&zec_pages_sapling_efvk).len(), + 1, + "all results for this key should have been deleted, one empty entry should remain" + ); + + for (_, result) in db.sapling_results(&zec_pages_sapling_efvk) { + assert!( + result.is_empty(), + "there should be no results for this entry in the db" + ); + } + + Ok(()) +} + +/// Tests that results for key are returned correctly +#[tokio::test] +pub async fn scan_service_get_results_for_key_correctly() -> Result<()> { + let mut db = new_test_storage(Network::Mainnet); + + let zec_pages_sapling_efvk = ZECPAGES_SAPLING_VIEWING_KEY.to_string(); + + for fake_result_height in [Height::MIN, Height(1), Height::MAX] { + db.insert_sapling_results( + &zec_pages_sapling_efvk, + fake_result_height, + fake_sapling_results([ + TransactionIndex::MIN, + TransactionIndex::from_index(40), + TransactionIndex::MAX, + ]), + ); + } + + assert!( + db.sapling_results(&zec_pages_sapling_efvk).len() == 3, + "there should be 3 heights for this key in the db" + ); + + for (_height, transactions) in db.sapling_results(&zec_pages_sapling_efvk) { + assert!( + transactions.len() == 3, + "there should be 3 transactions for each height for this key in the db" + ); + } + + // We don't need to send any command to the scanner for this call. + let (mut scan_service, _cmd_receiver) = ScanService::new_with_mock_scanner(db); + + let response_fut = scan_service + .ready() + .await + .map_err(|err| eyre!(err))? + .call(Request::Results(vec![zec_pages_sapling_efvk.clone()])); + + match response_fut.await.map_err(|err| eyre!(err))? { + Response::Results(results) => { + assert!( + results.contains_key(&zec_pages_sapling_efvk), + "results should contain the requested key" + ); + assert!(results.len() == 1, "values are only for 1 key"); + + assert!( + results + .get_key_value(&zec_pages_sapling_efvk) + .unwrap() + .1 + .len() + == 3, + "we should have 3 heights for the given key " + ); + + for transactions in results + .get_key_value(&zec_pages_sapling_efvk) + .unwrap() + .1 + .values() + { + assert!( + transactions.len() == 3, + "there should be 3 transactions for each height for this key" + ); + } + } + _ => panic!("scan service returned unexpected response variant"), + }; + + Ok(()) +} + +/// Tests that the scan service registers keys correctly. +#[tokio::test] +pub async fn scan_service_registers_keys_correctly() -> Result<()> { + for network in Network::iter() { + scan_service_registers_keys_correctly_for(network).await?; + } + + Ok(()) +} + +async fn scan_service_registers_keys_correctly_for(network: Network) -> Result<()> { + // Mock the state. + let (state, _, _, chain_tip_change) = zebra_state::populated_state(vec![], network).await; + + // Instantiate the scan service. + let mut scan_service = ServiceBuilder::new() + .buffer(2) + .service(ScanService::new(&Config::ephemeral(), network, state, chain_tip_change).await); + + // Mock three Sapling keys. + let mocked_keys = mock_sapling_scanning_keys(3, network); + + // Add birth heights to the mocked keys. + let keys_to_register: Vec<_> = mocked_keys + .clone() + .into_iter() + .zip((0u32..).map(Some)) + .collect(); + + // Register the first key. + match scan_service + .ready() + .await + .map_err(|err| eyre!(err))? + .call(Request::RegisterKeys(keys_to_register[..1].to_vec())) + .await + .map_err(|err| eyre!(err))? + { + Response::RegisteredKeys(registered_keys) => { + // The key should be registered. + assert_eq!( + registered_keys, + mocked_keys[..1], + "response should match newly registered key" + ); + } + + _ => panic!("scan service should have responded with the `RegisteredKeys` response"), + } + + // Try registering all three keys. + match scan_service + .ready() + .await + .map_err(|err| eyre!(err))? + .call(Request::RegisterKeys(keys_to_register)) + .await + .map_err(|err| eyre!(err))? + { + Response::RegisteredKeys(registered_keys) => { + // Only the last two keys should be registered in this service call since the first one + // was registered in the previous call. + assert_eq!( + registered_keys, + mocked_keys[1..3], + "response should match newly registered keys" + ); + } + + _ => panic!("scan service should have responded with the `RegisteredKeys` response"), + } + + Ok(()) +} diff --git a/zebra-scan/src/storage.rs b/zebra-scan/src/storage.rs new file mode 100644 index 00000000000..2372957014e --- /dev/null +++ b/zebra-scan/src/storage.rs @@ -0,0 +1,126 @@ +//! Store viewing keys and results of the scan. + +use std::collections::{BTreeMap, HashMap}; + +use zebra_chain::{block::Height, parameters::Network}; +use zebra_state::TransactionIndex; + +use crate::config::Config; + +pub mod db; + +// Public types and APIs +pub use db::{SaplingScannedResult, SaplingScanningKey}; + +/// We insert an empty results entry to the database every this interval for each stored key, +/// so we can track progress. +pub const INSERT_CONTROL_INTERVAL: u32 = 1_000; + +/// Store key info and results of the scan. +/// +/// `rocksdb` allows concurrent writes through a shared reference, +/// so clones of the scanner storage represent the same database instance. +/// When the final clone is dropped, the database is closed. +#[derive(Clone, Debug)] +pub struct Storage { + // Configuration + // + // This configuration cannot be modified after the database is initialized, + // because some clones would have different values. + // + // TODO: add config if needed? + + // Owned State + // + // Everything contained in this state must be shared by all clones, or read-only. + // + /// The underlying database. + /// + /// `rocksdb` allows reads and writes via a shared reference, + /// so this database object can be freely cloned. + /// The last instance that is dropped will close the underlying database. + db: db::ScannerDb, +} + +impl Storage { + /// Opens and returns the on-disk scanner results storage for `config` and `network`. + /// If there is no existing storage, creates a new storage on disk. + /// + /// Birthdays and scanner progress are marked by inserting an empty result for that height. + /// + /// # Performance / Hangs + /// + /// This method can block while creating or reading database files, so it must be inside + /// spawn_blocking() in async code. + pub fn new(config: &Config, network: Network, read_only: bool) -> Self { + let mut storage = Self::new_db(config, network, read_only); + + for (sapling_key, birthday) in config.sapling_keys_to_scan.iter() { + storage.add_sapling_key(sapling_key, Some(zebra_chain::block::Height(*birthday))); + } + + storage + } + + /// Add a sapling key to the storage. + /// + /// # Performance / Hangs + /// + /// This method can block while writing database files, so it must be inside spawn_blocking() + /// in async code. + pub fn add_sapling_key( + &mut self, + sapling_key: &SaplingScanningKey, + birthday: impl Into>, + ) { + let birthday = birthday.into(); + + // It's ok to write some keys and not others during shutdown, so each key can get its own + // batch. (They will be re-written on startup anyway.) + self.insert_sapling_key(sapling_key, birthday); + } + + /// Returns all the keys and their last scanned heights. + /// + /// # Performance / Hangs + /// + /// This method can block while reading database files, so it must be inside spawn_blocking() + /// in async code. + pub fn sapling_keys_last_heights(&self) -> HashMap { + self.sapling_keys_and_last_scanned_heights() + } + + /// Add the sapling results for `height` to the storage. The results can be any map of + /// [`TransactionIndex`] to [`SaplingScannedResult`]. + /// + /// All the results for the same height must be written at the same time, to avoid partial + /// writes during shutdown. + /// + /// Also adds empty progress tracking entries every `INSERT_CONTROL_INTERVAL` blocks if needed. + /// + /// # Performance / Hangs + /// + /// This method can block while writing database files, so it must be inside spawn_blocking() + /// in async code. + pub fn add_sapling_results( + &mut self, + sapling_key: &SaplingScanningKey, + height: Height, + sapling_results: BTreeMap, + ) { + self.insert_sapling_results(sapling_key, height, sapling_results) + } + + /// Returns all the results for a sapling key, for every scanned block height. + /// + /// # Performance / Hangs + /// + /// This method can block while reading database files, so it must be inside spawn_blocking() + /// in async code. + pub fn sapling_results( + &self, + sapling_key: &SaplingScanningKey, + ) -> BTreeMap> { + self.sapling_results_for_key(sapling_key) + } +} diff --git a/zebra-scan/src/storage/db.rs b/zebra-scan/src/storage/db.rs new file mode 100644 index 00000000000..784ffb3d70f --- /dev/null +++ b/zebra-scan/src/storage/db.rs @@ -0,0 +1,138 @@ +//! Persistent storage for scanner results. + +use std::path::Path; + +use semver::Version; + +use zebra_chain::parameters::Network; + +use crate::Config; + +use super::Storage; + +// Public types and APIs +pub use zebra_state::{ + SaplingScannedDatabaseEntry, SaplingScannedDatabaseIndex, SaplingScannedResult, + SaplingScanningKey, ZebraDb as ScannerDb, +}; + +pub mod sapling; + +#[cfg(any(test, feature = "proptest-impl"))] +pub mod tests; + +/// The directory name used to distinguish the scanner database from Zebra's other databases or +/// flat files. +/// +/// We use "private" in the name to warn users not to share this data. +pub const SCANNER_DATABASE_KIND: &str = "private-scan"; + +/// The column families supported by the running `zebra-scan` database code. +/// +/// Existing column families that aren't listed here are preserved when the database is opened. +pub const SCANNER_COLUMN_FAMILIES_IN_CODE: &[&str] = &[ + // Sapling + sapling::SAPLING_TX_IDS, + // Orchard + // TODO: add Orchard support +]; + +/// The major version number of the scanner database. This must be updated whenever the database +/// format changes. +const SCANNER_DATABASE_FORMAT_MAJOR_VERSION: u64 = 1; + +impl Storage { + // Creation + + /// Opens and returns an on-disk scanner results database instance for `config` and `network`. + /// If there is no existing database, creates a new database on disk. + /// + /// New keys in `config` are not inserted into the database. + pub(crate) fn new_db(config: &Config, network: Network, read_only: bool) -> Self { + Self::new_with_debug( + config, network, + // TODO: make format upgrades work with any database, then change debug_skip_format_upgrades to `false` + true, read_only, + ) + } + + /// Returns an on-disk database instance with the supplied production and debug settings. + /// If there is no existing database, creates a new database on disk. + /// + /// New keys in `config` are not inserted into the database. + /// + /// This method is intended for use in tests. + pub(crate) fn new_with_debug( + config: &Config, + network: Network, + debug_skip_format_upgrades: bool, + read_only: bool, + ) -> Self { + let db = ScannerDb::new( + config.db_config(), + SCANNER_DATABASE_KIND, + &Self::database_format_version_in_code(), + network, + debug_skip_format_upgrades, + SCANNER_COLUMN_FAMILIES_IN_CODE + .iter() + .map(ToString::to_string), + read_only, + ); + + let new_storage = Self { db }; + + // Report where we are for each key in the database. + let keys = new_storage.sapling_keys_last_heights(); + for (key_num, (_key, height)) in keys.iter().enumerate() { + info!( + "Last scanned height for key number {} is {}, resuming at {}", + key_num, + height.as_usize(), + height.next().expect("height is not maximum").as_usize(), + ); + } + + info!("loaded Zebra scanner cache"); + + new_storage + } + + // Config + + /// Returns the configured network for this database. + pub fn network(&self) -> Network { + self.db.network() + } + + /// Returns the `Path` where the files used by this database are located. + pub fn path(&self) -> &Path { + self.db.path() + } + + // Versioning & Upgrades + + /// The database format version in the running scanner code. + pub fn database_format_version_in_code() -> Version { + // TODO: implement in-place scanner database format upgrades + Version::new(SCANNER_DATABASE_FORMAT_MAJOR_VERSION, 0, 0) + } + + /// Check for panics in code running in spawned threads. + /// If a thread exited with a panic, resume that panic. + /// + /// This method should be called regularly, so that panics are detected as soon as possible. + // + // TODO: when we implement format changes, call this method regularly + pub fn check_for_panics(&mut self) { + self.db.check_for_panics() + } + + // General database status + + /// Returns true if the database is empty. + pub fn is_empty(&self) -> bool { + // Any column family that is populated at (or near) startup can be used here. + self.sapling_tx_ids_cf().zs_is_empty() + } +} diff --git a/zebra-scan/src/storage/db/sapling.rs b/zebra-scan/src/storage/db/sapling.rs new file mode 100644 index 00000000000..b3c7b870b42 --- /dev/null +++ b/zebra-scan/src/storage/db/sapling.rs @@ -0,0 +1,301 @@ +//! Sapling-specific database reading and writing. +//! +//! The sapling scanner database has the following format: +//! +//! | name | Reading & Writing Key/Values | +//! |--------------------|-------------------------------------------------| +//! | [`SAPLING_TX_IDS`] | [`SaplingTxIdsCf`] & [`WriteSaplingTxIdsBatch`] | +//! +//! And types: +//! `SaplingScannedResult`: same as `transaction::Hash`, but with bytes in display order. +//! `None` is stored as a zero-length array of bytes. +//! +//! `SaplingScannedDatabaseIndex` = `SaplingScanningKey` | `TransactionLocation` +//! `TransactionLocation` = `Height` | `TransactionIndex` +//! +//! This format allows us to efficiently find all the results for each key, and the latest height +//! for each key. +//! +//! If there are no results for a height, we store `None` as the result for the coinbase +//! transaction. This allows is to scan each key from the next height after we restart. We also use +//! this mechanism to store key birthday heights, by storing the height before the birthday as the +//! "last scanned" block. + +use std::{ + collections::{BTreeMap, HashMap}, + ops::RangeBounds, +}; + +use itertools::Itertools; + +use zebra_chain::block::Height; +use zebra_state::{ + DiskWriteBatch, SaplingScannedDatabaseEntry, SaplingScannedDatabaseIndex, SaplingScannedResult, + SaplingScanningKey, TransactionIndex, TransactionLocation, TypedColumnFamily, WriteTypedBatch, +}; + +use crate::storage::{Storage, INSERT_CONTROL_INTERVAL}; + +/// The name of the sapling transaction IDs result column family. +/// +/// This constant should be used so the compiler can detect typos. +pub const SAPLING_TX_IDS: &str = "sapling_tx_ids"; + +/// The type for reading sapling transaction IDs results from the database. +/// +/// This constant should be used so the compiler can detect incorrectly typed accesses to the +/// column family. +pub type SaplingTxIdsCf<'cf> = + TypedColumnFamily<'cf, SaplingScannedDatabaseIndex, Option>; + +/// The type for writing sapling transaction IDs results from the database. +/// +/// This constant should be used so the compiler can detect incorrectly typed accesses to the +/// column family. +pub type WriteSaplingTxIdsBatch<'cf> = + WriteTypedBatch<'cf, SaplingScannedDatabaseIndex, Option, DiskWriteBatch>; + +impl Storage { + // Reading Sapling database entries + + /// Returns the result for a specific database index (key, block height, transaction index). + /// Returns `None` if the result is missing or an empty marker for a birthday or progress + /// height. + pub fn sapling_result_for_index( + &self, + index: &SaplingScannedDatabaseIndex, + ) -> Option { + self.sapling_tx_ids_cf().zs_get(index).flatten() + } + + /// Returns the results for a specific key and block height. + pub fn sapling_results_for_key_and_height( + &self, + sapling_key: &SaplingScanningKey, + height: Height, + ) -> BTreeMap> { + let kh_min = SaplingScannedDatabaseIndex::min_for_key_and_height(sapling_key, height); + let kh_max = SaplingScannedDatabaseIndex::max_for_key_and_height(sapling_key, height); + + self.sapling_results_in_range(kh_min..=kh_max) + .into_iter() + .map(|(result_index, txid)| (result_index.tx_loc.index, txid)) + .collect() + } + + /// Returns all the results for a specific key, indexed by height. + pub fn sapling_results_for_key( + &self, + sapling_key: &SaplingScanningKey, + ) -> BTreeMap> { + let k_min = SaplingScannedDatabaseIndex::min_for_key(sapling_key); + let k_max = SaplingScannedDatabaseIndex::max_for_key(sapling_key); + + // Get an iterator of individual transaction results, and turn it into a HashMap by height + let results: HashMap>> = self + .sapling_results_in_range(k_min..=k_max) + .into_iter() + .map(|(index, result)| (index.tx_loc.height, result)) + .into_group_map(); + + // But we want Vec, with empty Vecs instead of [None, None, ...] + results + .into_iter() + .map(|(index, vector)| -> (Height, Vec) { + (index, vector.into_iter().flatten().collect()) + }) + .collect() + } + + /// Returns all the keys and their last scanned heights. + pub fn sapling_keys_and_last_scanned_heights(&self) -> HashMap { + let sapling_tx_ids = self.sapling_tx_ids_cf(); + let mut keys = HashMap::new(); + + let mut last_stored_record = sapling_tx_ids.zs_last_key_value(); + + while let Some((last_stored_record_index, _result)) = last_stored_record { + let sapling_key = last_stored_record_index.sapling_key.clone(); + let height = last_stored_record_index.tx_loc.height; + + let prev_height = keys.insert(sapling_key.clone(), height); + assert_eq!( + prev_height, None, + "unexpected duplicate key: keys must only be inserted once \ + last_stored_record_index: {last_stored_record_index:?}", + ); + + // Skip all the results until the next key. + last_stored_record = sapling_tx_ids.zs_prev_key_value_strictly_before( + &SaplingScannedDatabaseIndex::min_for_key(&sapling_key), + ); + } + + keys + } + + /// Returns the Sapling indexes and results in the supplied range. + /// + /// Convenience method for accessing raw data with the correct types. + fn sapling_results_in_range( + &self, + range: impl RangeBounds, + ) -> BTreeMap> { + self.sapling_tx_ids_cf().zs_items_in_range_ordered(range) + } + + // Column family convenience methods + + /// Returns a typed handle to the `sapling_tx_ids` column family. + pub(crate) fn sapling_tx_ids_cf(&self) -> SaplingTxIdsCf { + SaplingTxIdsCf::new(&self.db, SAPLING_TX_IDS) + .expect("column family was created when database was created") + } + + // Writing database entries + // + // To avoid exposing internal types, and accidentally forgetting to write a batch, + // each pub(crate) write method should write an entire batch. + + /// Inserts a batch of scanned sapling result for a key and height. + /// If a result already exists for that key, height, and index, it is replaced. + pub fn insert_sapling_results( + &mut self, + sapling_key: &SaplingScanningKey, + height: Height, + sapling_results: BTreeMap, + ) { + // We skip key heights that have one or more results, so the results for each key height + // must be in a single batch. + let mut batch = self.sapling_tx_ids_cf().new_batch_for_writing(); + + // Every `INSERT_CONTROL_INTERVAL` we add a new entry to the scanner database for each key + // so we can track progress made in the last interval even if no transaction was yet found. + let needs_control_entry = + height.0 % INSERT_CONTROL_INTERVAL == 0 && sapling_results.is_empty(); + + // Add scanner progress tracking entry for key. + // Defensive programming: add the tracking entry first, so that we don't accidentally + // overwrite real results with it. (This is currently prevented by the empty check.) + if needs_control_entry { + batch = batch.insert_sapling_height(sapling_key, height); + } + + for (index, sapling_result) in sapling_results { + let index = SaplingScannedDatabaseIndex { + sapling_key: sapling_key.clone(), + tx_loc: TransactionLocation::from_parts(height, index), + }; + + let entry = SaplingScannedDatabaseEntry { + index, + value: Some(sapling_result), + }; + + batch = batch.zs_insert(&entry.index, &entry.value); + } + + batch + .write_batch() + .expect("unexpected database write failure"); + } + + /// Insert a sapling scanning `key`, and mark all heights before `birthday_height` so they + /// won't be scanned. + /// + /// If a result already exists for the coinbase transaction at the height before the birthday, + /// it is replaced with an empty result. This can happen if the user increases the birthday + /// height. + /// + /// TODO: ignore incorrect changes to birthday heights + pub(crate) fn insert_sapling_key( + &mut self, + sapling_key: &SaplingScanningKey, + birthday_height: Option, + ) { + let min_birthday_height = self.network().sapling_activation_height(); + + // The birthday height must be at least the minimum height for that pool. + let birthday_height = birthday_height + .unwrap_or(min_birthday_height) + .max(min_birthday_height); + // And we want to skip up to the height before it. + let skip_up_to_height = birthday_height.previous().unwrap_or(Height::MIN); + + // It's ok to write some keys and not others during shutdown, so each key can get its own + // batch. (They will be re-written on startup anyway.) + // + // TODO: ignore incorrect changes to birthday heights, + // and redundant birthday heights + self.sapling_tx_ids_cf() + .new_batch_for_writing() + .insert_sapling_height(sapling_key, skip_up_to_height) + .write_batch() + .expect("unexpected database write failure"); + } + + /// Delete the sapling keys and their results, if they exist, + pub fn delete_sapling_keys(&mut self, keys: Vec) { + self.sapling_tx_ids_cf() + .new_batch_for_writing() + .delete_sapling_keys(keys) + .write_batch() + .expect("unexpected database write failure"); + } + + /// Delete the results of sapling scanning `keys`, if they exist + pub(crate) fn delete_sapling_results(&mut self, keys: Vec) { + let mut batch = self + .sapling_tx_ids_cf() + .new_batch_for_writing() + .delete_sapling_keys(keys.clone()); + + for key in &keys { + batch = batch.insert_sapling_height(key, Height::MIN); + } + + batch + .write_batch() + .expect("unexpected database write failure"); + } +} + +/// Utility trait for inserting sapling heights into a WriteSaplingTxIdsBatch. +trait InsertSaplingHeight { + fn insert_sapling_height(self, sapling_key: &SaplingScanningKey, height: Height) -> Self; +} + +impl<'cf> InsertSaplingHeight for WriteSaplingTxIdsBatch<'cf> { + /// Insert sapling height with no results. + /// + /// If a result already exists for the coinbase transaction at that height, + /// it is replaced with an empty result. This should never happen. + fn insert_sapling_height(self, sapling_key: &SaplingScanningKey, height: Height) -> Self { + let index = SaplingScannedDatabaseIndex::min_for_key_and_height(sapling_key, height); + + // TODO: assert that we don't overwrite any entries here. + self.zs_insert(&index, &None) + } +} + +/// Utility trait for deleting sapling keys in a WriteSaplingTxIdsBatch. +trait DeleteSaplingKeys { + fn delete_sapling_keys(self, sapling_key: Vec) -> Self; +} + +impl<'cf> DeleteSaplingKeys for WriteSaplingTxIdsBatch<'cf> { + /// Delete sapling keys and their results. + fn delete_sapling_keys(mut self, sapling_keys: Vec) -> Self { + for key in &sapling_keys { + let from_index = SaplingScannedDatabaseIndex::min_for_key(key); + let until_strictly_before_index = SaplingScannedDatabaseIndex::max_for_key(key); + + self = self + .zs_delete_range(&from_index, &until_strictly_before_index) + // TODO: convert zs_delete_range() to take std::ops::RangeBounds + .zs_delete(&until_strictly_before_index); + } + + self + } +} diff --git a/zebra-scan/src/storage/db/tests.rs b/zebra-scan/src/storage/db/tests.rs new file mode 100644 index 00000000000..f34650ab262 --- /dev/null +++ b/zebra-scan/src/storage/db/tests.rs @@ -0,0 +1,96 @@ +//! General scanner database tests. + +use std::{collections::BTreeMap, sync::Arc}; + +use zebra_chain::{ + block::{Block, Height}, + parameters::Network::{self, *}, + serialization::ZcashDeserializeInto, + transaction, +}; +use zebra_state::{SaplingScannedResult, TransactionIndex}; + +use crate::{ + storage::{Storage, INSERT_CONTROL_INTERVAL}, + tests::{FAKE_SAPLING_VIEWING_KEY, ZECPAGES_SAPLING_VIEWING_KEY}, + Config, +}; + +#[cfg(test)] +mod snapshot; + +#[cfg(test)] +mod vectors; + +/// Returns an empty `Storage` suitable for testing. +pub fn new_test_storage(network: Network) -> Storage { + Storage::new(&Config::ephemeral(), network, false) +} + +/// Add fake keys to `storage` for testing purposes. +pub fn add_fake_keys(storage: &mut Storage) { + // Snapshot a birthday that is automatically set to activation height + storage.add_sapling_key(&ZECPAGES_SAPLING_VIEWING_KEY.to_string(), None); + // Snapshot a birthday above activation height + storage.add_sapling_key(&FAKE_SAPLING_VIEWING_KEY.to_string(), Height(1_000_000)); +} + +/// Add fake results to `storage` for testing purposes. +/// +/// If `add_progress_marker` is `true`, adds a progress marker. +/// If it is `false`, adds the transaction hashes from `height`. +pub fn add_fake_results( + storage: &mut Storage, + network: Network, + height: Height, + add_progress_marker: bool, +) { + let blocks = match network { + Mainnet => &*zebra_test::vectors::CONTINUOUS_MAINNET_BLOCKS, + Testnet => &*zebra_test::vectors::CONTINUOUS_TESTNET_BLOCKS, + }; + + let block: Arc = blocks + .get(&height.0) + .expect("block height has test data") + .zcash_deserialize_into() + .expect("test data deserializes"); + + if add_progress_marker { + // Fake a progress marker. + let next_progress_height = height.0.next_multiple_of(INSERT_CONTROL_INTERVAL); + storage.add_sapling_results( + &FAKE_SAPLING_VIEWING_KEY.to_string(), + Height(next_progress_height), + BTreeMap::new(), + ); + } else { + // Fake results from the block at `height`. + storage.add_sapling_results( + &ZECPAGES_SAPLING_VIEWING_KEY.to_string(), + height, + block + .transactions + .iter() + .enumerate() + .map(|(index, tx)| (TransactionIndex::from_usize(index), tx.hash().into())) + .collect(), + ); + } +} + +/// Accepts an iterator of [`TransactionIndex`]es and returns a `BTreeMap` with empty results +pub fn fake_sapling_results>( + transaction_indexes: T, +) -> BTreeMap { + let mut fake_sapling_results = BTreeMap::new(); + + for transaction_index in transaction_indexes { + fake_sapling_results.insert( + transaction_index, + SaplingScannedResult::from(transaction::Hash::from([0; 32])), + ); + } + + fake_sapling_results +} diff --git a/zebra-scan/src/storage/db/tests/snapshot.rs b/zebra-scan/src/storage/db/tests/snapshot.rs new file mode 100644 index 00000000000..f3f18e2ef03 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshot.rs @@ -0,0 +1,197 @@ +//! Raw data snapshot tests for the scanner database format. +//! +//! These tests check: +//! - the name of each column family +//! - the number of key-value entries +//! - the bytes in each key and value +//! +//! These tests currently use fixed test vectors. +//! +//! # Fixing Test Failures +//! +//! If this test fails, run: +//! ```sh +//! cd zebra-scan +//! cargo insta test --review --features shielded-scan +//! ``` +//! to update the test snapshots, then commit the `test_*.snap` files using git. +//! +//! # Snapshot Format +//! +//! These snapshots use [RON (Rusty Object Notation)](https://github.com/ron-rs/ron#readme), +//! a text format similar to Rust syntax. Raw byte data is encoded in hexadecimal. +//! +//! Due to `serde` limitations, some object types can't be represented exactly, +//! so RON uses the closest equivalent structure. + +use std::collections::BTreeMap; + +use itertools::Itertools; + +use zebra_chain::{ + block::Height, + parameters::Network::{self, *}, +}; +use zebra_state::{RawBytes, ReadDisk, SaplingScannedDatabaseIndex, TransactionLocation, KV}; + +use crate::storage::{db::ScannerDb, Storage}; + +/// Snapshot test for: +/// - RocksDB column families, and their raw key-value data, and +/// - typed scanner result data using high-level storage methods. +/// +/// These snapshots contain the `default` column family, but it is not used by Zebra. +#[test] +fn test_database_format() { + let _init_guard = zebra_test::init(); + + test_database_format_with_network(Mainnet); + test_database_format_with_network(Testnet); +} + +/// Snapshot raw and typed database formats for `network`. +/// +/// See [`test_database_format()`] for details. +fn test_database_format_with_network(network: Network) { + let mut net_suffix = network.to_string(); + net_suffix.make_ascii_lowercase(); + + let mut storage = super::new_test_storage(network); + + // Snapshot the column family names + let mut cf_names = storage.db.list_cf().expect("empty database is valid"); + + // The order that RocksDB returns column families is irrelevant, + // because we always access them by name. + cf_names.sort(); + + // Assert that column family names are the same, regardless of the network. + // Later, we check they are also the same regardless of the block height. + insta::assert_ron_snapshot!("column_family_names", cf_names); + + // Assert that empty databases are the same, regardless of the network. + let mut settings = insta::Settings::clone_current(); + + settings.set_snapshot_suffix("empty"); + settings.bind(|| snapshot_raw_rocksdb_column_family_data(&storage.db, &cf_names)); + settings.bind(|| snapshot_typed_result_data(&storage)); + + super::add_fake_keys(&mut storage); + + // Assert that the key format doesn't change. + settings.set_snapshot_suffix(format!("{net_suffix}_keys")); + settings.bind(|| snapshot_raw_rocksdb_column_family_data(&storage.db, &cf_names)); + settings.bind(|| snapshot_typed_result_data(&storage)); + + // Snapshot raw database data for: + // - mainnet and testnet + // - genesis, block 1, and block 2 + // + // We limit the number of blocks, because we create 2 snapshots per block, one for each network. + for height in 0..=2 { + super::add_fake_results(&mut storage, network, Height(height), true); + super::add_fake_results(&mut storage, network, Height(height), false); + + let mut settings = insta::Settings::clone_current(); + settings.set_snapshot_suffix(format!("{net_suffix}_{height}")); + + // Assert that the result format doesn't change. + settings.bind(|| snapshot_raw_rocksdb_column_family_data(&storage.db, &cf_names)); + settings.bind(|| snapshot_typed_result_data(&storage)); + } +} + +/// Snapshot the data in each column family, using `cargo insta` and RON serialization. +fn snapshot_raw_rocksdb_column_family_data(db: &ScannerDb, original_cf_names: &[String]) { + let mut new_cf_names = db.list_cf().expect("empty database is valid"); + new_cf_names.sort(); + + // Assert that column family names are the same, regardless of the network or block height. + assert_eq!( + original_cf_names, new_cf_names, + "unexpected extra column families", + ); + + let mut empty_column_families = Vec::new(); + + // Now run the data snapshots + for cf_name in original_cf_names { + let cf_handle = db + .cf_handle(cf_name) + .expect("RocksDB API provides correct names"); + + // Correctness: Multi-key iteration causes hangs in concurrent code, but seems ok in tests. + let cf_items: BTreeMap = db.zs_items_in_range_ordered(&cf_handle, ..); + + // The default raw data serialization is very verbose, so we hex-encode the bytes. + let cf_data: Vec = cf_items + .iter() + .map(|(key, value)| KV::new(key.raw_bytes(), value.raw_bytes())) + .collect(); + + if cf_name == "default" { + assert_eq!(cf_data.len(), 0, "default column family is never used"); + } else if cf_data.is_empty() { + // Distinguish column family names from empty column families + empty_column_families.push(format!("{cf_name}: no entries")); + } else { + // Make sure the raw format doesn't accidentally change. + insta::assert_ron_snapshot!(format!("{cf_name}_raw_data"), cf_data); + } + } + + insta::assert_ron_snapshot!("empty_column_families", empty_column_families); +} + +/// Snapshot typed scanner result data using high-level storage methods, +/// using `cargo insta` and RON serialization. +fn snapshot_typed_result_data(storage: &Storage) { + // Make sure the typed key format doesn't accidentally change. + let sapling_keys_last_heights = storage.sapling_keys_last_heights(); + + // HashMap has an unstable order across Rust releases, so we need to sort it here. + insta::assert_ron_snapshot!( + "sapling_keys", + sapling_keys_last_heights, + { + "." => insta::sorted_redaction() + } + ); + + // HashMap has an unstable order across Rust releases, so we need to sort it here as well. + for (key_index, (sapling_key, last_height)) in + sapling_keys_last_heights.iter().sorted().enumerate() + { + let sapling_results = storage.sapling_results(sapling_key); + + assert_eq!(sapling_results.keys().max(), Some(last_height)); + + // Check internal database method consistency + for (height, results) in sapling_results.iter() { + let sapling_index_and_results = + storage.sapling_results_for_key_and_height(sapling_key, *height); + + // The list of results for each height must match the results queried by that height. + let sapling_results_for_height: Vec<_> = sapling_index_and_results + .values() + .flatten() + .cloned() + .collect(); + assert_eq!(results, &sapling_results_for_height); + + for (index, result) in sapling_index_and_results { + let index = SaplingScannedDatabaseIndex { + sapling_key: sapling_key.clone(), + tx_loc: TransactionLocation::from_parts(*height, index), + }; + + // The result for each index must match the result queried by that index. + let sapling_result_for_index = storage.sapling_result_for_index(&index); + assert_eq!(result, sapling_result_for_index); + } + } + + // Make sure the typed result format doesn't accidentally change. + insta::assert_ron_snapshot!(format!("sapling_key_{key_index}_results"), sapling_results); + } +} diff --git a/zebra-scan/src/storage/db/tests/snapshots/column_family_names.snap b/zebra-scan/src/storage/db/tests/snapshots/column_family_names.snap new file mode 100644 index 00000000000..dc65b126d9c --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/column_family_names.snap @@ -0,0 +1,8 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: cf_names +--- +[ + "default", + "sapling_tx_ids", +] diff --git a/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@empty.snap b/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@empty.snap new file mode 100644 index 00000000000..df6ef1d5f19 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@empty.snap @@ -0,0 +1,7 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: empty_column_families +--- +[ + "sapling_tx_ids: no entries", +] diff --git a/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@mainnet_0.snap b/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@mainnet_0.snap new file mode 100644 index 00000000000..15b9ea627c6 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@mainnet_0.snap @@ -0,0 +1,5 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: empty_column_families +--- +[] diff --git a/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@mainnet_1.snap b/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@mainnet_1.snap new file mode 100644 index 00000000000..15b9ea627c6 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@mainnet_1.snap @@ -0,0 +1,5 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: empty_column_families +--- +[] diff --git a/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@mainnet_2.snap b/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@mainnet_2.snap new file mode 100644 index 00000000000..15b9ea627c6 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@mainnet_2.snap @@ -0,0 +1,5 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: empty_column_families +--- +[] diff --git a/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@mainnet_keys.snap b/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@mainnet_keys.snap new file mode 100644 index 00000000000..15b9ea627c6 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@mainnet_keys.snap @@ -0,0 +1,5 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: empty_column_families +--- +[] diff --git a/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@testnet_0.snap b/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@testnet_0.snap new file mode 100644 index 00000000000..15b9ea627c6 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@testnet_0.snap @@ -0,0 +1,5 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: empty_column_families +--- +[] diff --git a/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@testnet_1.snap b/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@testnet_1.snap new file mode 100644 index 00000000000..15b9ea627c6 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@testnet_1.snap @@ -0,0 +1,5 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: empty_column_families +--- +[] diff --git a/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@testnet_2.snap b/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@testnet_2.snap new file mode 100644 index 00000000000..15b9ea627c6 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@testnet_2.snap @@ -0,0 +1,5 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: empty_column_families +--- +[] diff --git a/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@testnet_keys.snap b/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@testnet_keys.snap new file mode 100644 index 00000000000..15b9ea627c6 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/empty_column_families@testnet_keys.snap @@ -0,0 +1,5 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: empty_column_families +--- +[] diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_key_0_results@mainnet_0.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_0_results@mainnet_0.snap new file mode 100644 index 00000000000..c20e1d132f7 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_0_results@mainnet_0.snap @@ -0,0 +1,10 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: sapling_results +--- +{ + Height(0): [ + SaplingScannedResult("c4eaa58879081de3c24a7b117ed2b28300e7ec4c4c1dff1d3f1268b7857a4ddb"), + ], + Height(419199): [], +} diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_key_0_results@mainnet_1.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_0_results@mainnet_1.snap new file mode 100644 index 00000000000..421019cf1e0 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_0_results@mainnet_1.snap @@ -0,0 +1,13 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: sapling_results +--- +{ + Height(0): [ + SaplingScannedResult("c4eaa58879081de3c24a7b117ed2b28300e7ec4c4c1dff1d3f1268b7857a4ddb"), + ], + Height(1): [ + SaplingScannedResult("851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609"), + ], + Height(419199): [], +} diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_key_0_results@mainnet_2.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_0_results@mainnet_2.snap new file mode 100644 index 00000000000..6a65371ba0c --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_0_results@mainnet_2.snap @@ -0,0 +1,16 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: sapling_results +--- +{ + Height(0): [ + SaplingScannedResult("c4eaa58879081de3c24a7b117ed2b28300e7ec4c4c1dff1d3f1268b7857a4ddb"), + ], + Height(1): [ + SaplingScannedResult("851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609"), + ], + Height(2): [ + SaplingScannedResult("8974d08d1c5f9c860d8b629d582a56659a4a1dcb2b5f98a25a5afcc2a784b0f4"), + ], + Height(419199): [], +} diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_key_0_results@mainnet_keys.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_0_results@mainnet_keys.snap new file mode 100644 index 00000000000..1424368e285 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_0_results@mainnet_keys.snap @@ -0,0 +1,7 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: sapling_results +--- +{ + Height(419199): [], +} diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_key_0_results@testnet_0.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_0_results@testnet_0.snap new file mode 100644 index 00000000000..935314fdce3 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_0_results@testnet_0.snap @@ -0,0 +1,10 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: sapling_results +--- +{ + Height(0): [ + SaplingScannedResult("c4eaa58879081de3c24a7b117ed2b28300e7ec4c4c1dff1d3f1268b7857a4ddb"), + ], + Height(279999): [], +} diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_key_0_results@testnet_1.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_0_results@testnet_1.snap new file mode 100644 index 00000000000..ef7148e91e9 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_0_results@testnet_1.snap @@ -0,0 +1,13 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: sapling_results +--- +{ + Height(0): [ + SaplingScannedResult("c4eaa58879081de3c24a7b117ed2b28300e7ec4c4c1dff1d3f1268b7857a4ddb"), + ], + Height(1): [ + SaplingScannedResult("f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75"), + ], + Height(279999): [], +} diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_key_0_results@testnet_2.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_0_results@testnet_2.snap new file mode 100644 index 00000000000..886c68d92cc --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_0_results@testnet_2.snap @@ -0,0 +1,16 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: sapling_results +--- +{ + Height(0): [ + SaplingScannedResult("c4eaa58879081de3c24a7b117ed2b28300e7ec4c4c1dff1d3f1268b7857a4ddb"), + ], + Height(1): [ + SaplingScannedResult("f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75"), + ], + Height(2): [ + SaplingScannedResult("5822c0532da8a008259ac39933d3210e508c17e3ba21d2b2c428785efdccb3d5"), + ], + Height(279999): [], +} diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_key_0_results@testnet_keys.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_0_results@testnet_keys.snap new file mode 100644 index 00000000000..13543009987 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_0_results@testnet_keys.snap @@ -0,0 +1,7 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: sapling_results +--- +{ + Height(279999): [], +} diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_key_1_results@mainnet_0.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_1_results@mainnet_0.snap new file mode 100644 index 00000000000..160153b3123 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_1_results@mainnet_0.snap @@ -0,0 +1,8 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: sapling_results +--- +{ + Height(0): [], + Height(999999): [], +} diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_key_1_results@mainnet_1.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_1_results@mainnet_1.snap new file mode 100644 index 00000000000..c48c115fad9 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_1_results@mainnet_1.snap @@ -0,0 +1,9 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: sapling_results +--- +{ + Height(0): [], + Height(1000): [], + Height(999999): [], +} diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_key_1_results@mainnet_2.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_1_results@mainnet_2.snap new file mode 100644 index 00000000000..c48c115fad9 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_1_results@mainnet_2.snap @@ -0,0 +1,9 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: sapling_results +--- +{ + Height(0): [], + Height(1000): [], + Height(999999): [], +} diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_key_1_results@mainnet_keys.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_1_results@mainnet_keys.snap new file mode 100644 index 00000000000..2b1f3dc5dfe --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_1_results@mainnet_keys.snap @@ -0,0 +1,7 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: sapling_results +--- +{ + Height(999999): [], +} diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_key_1_results@testnet_0.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_1_results@testnet_0.snap new file mode 100644 index 00000000000..160153b3123 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_1_results@testnet_0.snap @@ -0,0 +1,8 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: sapling_results +--- +{ + Height(0): [], + Height(999999): [], +} diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_key_1_results@testnet_1.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_1_results@testnet_1.snap new file mode 100644 index 00000000000..c48c115fad9 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_1_results@testnet_1.snap @@ -0,0 +1,9 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: sapling_results +--- +{ + Height(0): [], + Height(1000): [], + Height(999999): [], +} diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_key_1_results@testnet_2.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_1_results@testnet_2.snap new file mode 100644 index 00000000000..c48c115fad9 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_1_results@testnet_2.snap @@ -0,0 +1,9 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: sapling_results +--- +{ + Height(0): [], + Height(1000): [], + Height(999999): [], +} diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_key_1_results@testnet_keys.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_1_results@testnet_keys.snap new file mode 100644 index 00000000000..2b1f3dc5dfe --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_key_1_results@testnet_keys.snap @@ -0,0 +1,7 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: sapling_results +--- +{ + Height(999999): [], +} diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@empty.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@empty.snap new file mode 100644 index 00000000000..16187cfe14e --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@empty.snap @@ -0,0 +1,5 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: sapling_keys_and_birthday_heights +--- +{} diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@mainnet_0.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@mainnet_0.snap new file mode 100644 index 00000000000..1a7b8aefebb --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@mainnet_0.snap @@ -0,0 +1,8 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: sapling_keys_last_heights +--- +{ + "zxviews1q0duytgcqqqqpqre26wkl45gvwwwd706xw608hucmvfalr759ejwf7qshjf5r9aa7323zulvz6plhttp5mltqcgs9t039cx2d09mgq05ts63n8u35hyv6h9nc9ctqqtue2u7cer2mqegunuulq2luhq3ywjcz35yyljewa4mgkgjzyfwh6fr6jd0dzd44ghk0nxdv2hnv4j5nxfwv24rwdmgllhe0p8568sgqt9ckt02v2kxf5ahtql6s0ltjpkckw8gtymxtxuu9gcr0swvz": Height(419199), + "zxviewsfake": Height(999999), +} diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@mainnet_1.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@mainnet_1.snap new file mode 100644 index 00000000000..1a7b8aefebb --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@mainnet_1.snap @@ -0,0 +1,8 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: sapling_keys_last_heights +--- +{ + "zxviews1q0duytgcqqqqpqre26wkl45gvwwwd706xw608hucmvfalr759ejwf7qshjf5r9aa7323zulvz6plhttp5mltqcgs9t039cx2d09mgq05ts63n8u35hyv6h9nc9ctqqtue2u7cer2mqegunuulq2luhq3ywjcz35yyljewa4mgkgjzyfwh6fr6jd0dzd44ghk0nxdv2hnv4j5nxfwv24rwdmgllhe0p8568sgqt9ckt02v2kxf5ahtql6s0ltjpkckw8gtymxtxuu9gcr0swvz": Height(419199), + "zxviewsfake": Height(999999), +} diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@mainnet_2.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@mainnet_2.snap new file mode 100644 index 00000000000..1a7b8aefebb --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@mainnet_2.snap @@ -0,0 +1,8 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: sapling_keys_last_heights +--- +{ + "zxviews1q0duytgcqqqqpqre26wkl45gvwwwd706xw608hucmvfalr759ejwf7qshjf5r9aa7323zulvz6plhttp5mltqcgs9t039cx2d09mgq05ts63n8u35hyv6h9nc9ctqqtue2u7cer2mqegunuulq2luhq3ywjcz35yyljewa4mgkgjzyfwh6fr6jd0dzd44ghk0nxdv2hnv4j5nxfwv24rwdmgllhe0p8568sgqt9ckt02v2kxf5ahtql6s0ltjpkckw8gtymxtxuu9gcr0swvz": Height(419199), + "zxviewsfake": Height(999999), +} diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@mainnet_keys.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@mainnet_keys.snap new file mode 100644 index 00000000000..1a7b8aefebb --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@mainnet_keys.snap @@ -0,0 +1,8 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: sapling_keys_last_heights +--- +{ + "zxviews1q0duytgcqqqqpqre26wkl45gvwwwd706xw608hucmvfalr759ejwf7qshjf5r9aa7323zulvz6plhttp5mltqcgs9t039cx2d09mgq05ts63n8u35hyv6h9nc9ctqqtue2u7cer2mqegunuulq2luhq3ywjcz35yyljewa4mgkgjzyfwh6fr6jd0dzd44ghk0nxdv2hnv4j5nxfwv24rwdmgllhe0p8568sgqt9ckt02v2kxf5ahtql6s0ltjpkckw8gtymxtxuu9gcr0swvz": Height(419199), + "zxviewsfake": Height(999999), +} diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@testnet_0.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@testnet_0.snap new file mode 100644 index 00000000000..4fcc5b8d921 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@testnet_0.snap @@ -0,0 +1,8 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: sapling_keys_last_heights +--- +{ + "zxviews1q0duytgcqqqqpqre26wkl45gvwwwd706xw608hucmvfalr759ejwf7qshjf5r9aa7323zulvz6plhttp5mltqcgs9t039cx2d09mgq05ts63n8u35hyv6h9nc9ctqqtue2u7cer2mqegunuulq2luhq3ywjcz35yyljewa4mgkgjzyfwh6fr6jd0dzd44ghk0nxdv2hnv4j5nxfwv24rwdmgllhe0p8568sgqt9ckt02v2kxf5ahtql6s0ltjpkckw8gtymxtxuu9gcr0swvz": Height(279999), + "zxviewsfake": Height(999999), +} diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@testnet_1.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@testnet_1.snap new file mode 100644 index 00000000000..4fcc5b8d921 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@testnet_1.snap @@ -0,0 +1,8 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: sapling_keys_last_heights +--- +{ + "zxviews1q0duytgcqqqqpqre26wkl45gvwwwd706xw608hucmvfalr759ejwf7qshjf5r9aa7323zulvz6plhttp5mltqcgs9t039cx2d09mgq05ts63n8u35hyv6h9nc9ctqqtue2u7cer2mqegunuulq2luhq3ywjcz35yyljewa4mgkgjzyfwh6fr6jd0dzd44ghk0nxdv2hnv4j5nxfwv24rwdmgllhe0p8568sgqt9ckt02v2kxf5ahtql6s0ltjpkckw8gtymxtxuu9gcr0swvz": Height(279999), + "zxviewsfake": Height(999999), +} diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@testnet_2.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@testnet_2.snap new file mode 100644 index 00000000000..4fcc5b8d921 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@testnet_2.snap @@ -0,0 +1,8 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: sapling_keys_last_heights +--- +{ + "zxviews1q0duytgcqqqqpqre26wkl45gvwwwd706xw608hucmvfalr759ejwf7qshjf5r9aa7323zulvz6plhttp5mltqcgs9t039cx2d09mgq05ts63n8u35hyv6h9nc9ctqqtue2u7cer2mqegunuulq2luhq3ywjcz35yyljewa4mgkgjzyfwh6fr6jd0dzd44ghk0nxdv2hnv4j5nxfwv24rwdmgllhe0p8568sgqt9ckt02v2kxf5ahtql6s0ltjpkckw8gtymxtxuu9gcr0swvz": Height(279999), + "zxviewsfake": Height(999999), +} diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@testnet_keys.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@testnet_keys.snap new file mode 100644 index 00000000000..4fcc5b8d921 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_keys@testnet_keys.snap @@ -0,0 +1,8 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: sapling_keys_last_heights +--- +{ + "zxviews1q0duytgcqqqqpqre26wkl45gvwwwd706xw608hucmvfalr759ejwf7qshjf5r9aa7323zulvz6plhttp5mltqcgs9t039cx2d09mgq05ts63n8u35hyv6h9nc9ctqqtue2u7cer2mqegunuulq2luhq3ywjcz35yyljewa4mgkgjzyfwh6fr6jd0dzd44ghk0nxdv2hnv4j5nxfwv24rwdmgllhe0p8568sgqt9ckt02v2kxf5ahtql6s0ltjpkckw8gtymxtxuu9gcr0swvz": Height(279999), + "zxviewsfake": Height(999999), +} diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_tx_ids_raw_data@mainnet_0.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_tx_ids_raw_data@mainnet_0.snap new file mode 100644 index 00000000000..7cb2ced6139 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_tx_ids_raw_data@mainnet_0.snap @@ -0,0 +1,22 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "7a78766965777331713064757974676371717171707172653236776b6c343567767777776437303678773630386875636d7666616c72373539656a7766377173686a663572396161373332337a756c767a36706c68747470356d6c747163677339743033396378326430396d67713035747336336e387533356879763668396e633963747171747565327537636572326d716567756e75756c71326c7568713379776a637a333579796c6a657761346d676b676a7a79667768366672366a6430647a64343467686b306e78647632686e76346a356e7866777632347277646d676c6c68653070383536387367717439636b74303276326b786635616874716c3673306c746a706b636b77386774796d787478757539676372307377767a0000000000", + v: "c4eaa58879081de3c24a7b117ed2b28300e7ec4c4c1dff1d3f1268b7857a4ddb", + ), + KV( + k: "7a78766965777331713064757974676371717171707172653236776b6c343567767777776437303678773630386875636d7666616c72373539656a7766377173686a663572396161373332337a756c767a36706c68747470356d6c747163677339743033396378326430396d67713035747336336e387533356879763668396e633963747171747565327537636572326d716567756e75756c71326c7568713379776a637a333579796c6a657761346d676b676a7a79667768366672366a6430647a64343467686b306e78647632686e76346a356e7866777632347277646d676c6c68653070383536387367717439636b74303276326b786635616874716c3673306c746a706b636b77386774796d787478757539676372307377767a06657f0000", + v: "", + ), + KV( + k: "7a78766965777366616b650000000000", + v: "", + ), + KV( + k: "7a78766965777366616b650f423f0000", + v: "", + ), +] diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_tx_ids_raw_data@mainnet_1.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_tx_ids_raw_data@mainnet_1.snap new file mode 100644 index 00000000000..e3560142e71 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_tx_ids_raw_data@mainnet_1.snap @@ -0,0 +1,30 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "7a78766965777331713064757974676371717171707172653236776b6c343567767777776437303678773630386875636d7666616c72373539656a7766377173686a663572396161373332337a756c767a36706c68747470356d6c747163677339743033396378326430396d67713035747336336e387533356879763668396e633963747171747565327537636572326d716567756e75756c71326c7568713379776a637a333579796c6a657761346d676b676a7a79667768366672366a6430647a64343467686b306e78647632686e76346a356e7866777632347277646d676c6c68653070383536387367717439636b74303276326b786635616874716c3673306c746a706b636b77386774796d787478757539676372307377767a0000000000", + v: "c4eaa58879081de3c24a7b117ed2b28300e7ec4c4c1dff1d3f1268b7857a4ddb", + ), + KV( + k: "7a78766965777331713064757974676371717171707172653236776b6c343567767777776437303678773630386875636d7666616c72373539656a7766377173686a663572396161373332337a756c767a36706c68747470356d6c747163677339743033396378326430396d67713035747336336e387533356879763668396e633963747171747565327537636572326d716567756e75756c71326c7568713379776a637a333579796c6a657761346d676b676a7a79667768366672366a6430647a64343467686b306e78647632686e76346a356e7866777632347277646d676c6c68653070383536387367717439636b74303276326b786635616874716c3673306c746a706b636b77386774796d787478757539676372307377767a0000010000", + v: "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609", + ), + KV( + k: "7a78766965777331713064757974676371717171707172653236776b6c343567767777776437303678773630386875636d7666616c72373539656a7766377173686a663572396161373332337a756c767a36706c68747470356d6c747163677339743033396378326430396d67713035747336336e387533356879763668396e633963747171747565327537636572326d716567756e75756c71326c7568713379776a637a333579796c6a657761346d676b676a7a79667768366672366a6430647a64343467686b306e78647632686e76346a356e7866777632347277646d676c6c68653070383536387367717439636b74303276326b786635616874716c3673306c746a706b636b77386774796d787478757539676372307377767a06657f0000", + v: "", + ), + KV( + k: "7a78766965777366616b650000000000", + v: "", + ), + KV( + k: "7a78766965777366616b650003e80000", + v: "", + ), + KV( + k: "7a78766965777366616b650f423f0000", + v: "", + ), +] diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_tx_ids_raw_data@mainnet_2.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_tx_ids_raw_data@mainnet_2.snap new file mode 100644 index 00000000000..a88179cbc01 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_tx_ids_raw_data@mainnet_2.snap @@ -0,0 +1,34 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "7a78766965777331713064757974676371717171707172653236776b6c343567767777776437303678773630386875636d7666616c72373539656a7766377173686a663572396161373332337a756c767a36706c68747470356d6c747163677339743033396378326430396d67713035747336336e387533356879763668396e633963747171747565327537636572326d716567756e75756c71326c7568713379776a637a333579796c6a657761346d676b676a7a79667768366672366a6430647a64343467686b306e78647632686e76346a356e7866777632347277646d676c6c68653070383536387367717439636b74303276326b786635616874716c3673306c746a706b636b77386774796d787478757539676372307377767a0000000000", + v: "c4eaa58879081de3c24a7b117ed2b28300e7ec4c4c1dff1d3f1268b7857a4ddb", + ), + KV( + k: "7a78766965777331713064757974676371717171707172653236776b6c343567767777776437303678773630386875636d7666616c72373539656a7766377173686a663572396161373332337a756c767a36706c68747470356d6c747163677339743033396378326430396d67713035747336336e387533356879763668396e633963747171747565327537636572326d716567756e75756c71326c7568713379776a637a333579796c6a657761346d676b676a7a79667768366672366a6430647a64343467686b306e78647632686e76346a356e7866777632347277646d676c6c68653070383536387367717439636b74303276326b786635616874716c3673306c746a706b636b77386774796d787478757539676372307377767a0000010000", + v: "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609", + ), + KV( + k: "7a78766965777331713064757974676371717171707172653236776b6c343567767777776437303678773630386875636d7666616c72373539656a7766377173686a663572396161373332337a756c767a36706c68747470356d6c747163677339743033396378326430396d67713035747336336e387533356879763668396e633963747171747565327537636572326d716567756e75756c71326c7568713379776a637a333579796c6a657761346d676b676a7a79667768366672366a6430647a64343467686b306e78647632686e76346a356e7866777632347277646d676c6c68653070383536387367717439636b74303276326b786635616874716c3673306c746a706b636b77386774796d787478757539676372307377767a0000020000", + v: "8974d08d1c5f9c860d8b629d582a56659a4a1dcb2b5f98a25a5afcc2a784b0f4", + ), + KV( + k: "7a78766965777331713064757974676371717171707172653236776b6c343567767777776437303678773630386875636d7666616c72373539656a7766377173686a663572396161373332337a756c767a36706c68747470356d6c747163677339743033396378326430396d67713035747336336e387533356879763668396e633963747171747565327537636572326d716567756e75756c71326c7568713379776a637a333579796c6a657761346d676b676a7a79667768366672366a6430647a64343467686b306e78647632686e76346a356e7866777632347277646d676c6c68653070383536387367717439636b74303276326b786635616874716c3673306c746a706b636b77386774796d787478757539676372307377767a06657f0000", + v: "", + ), + KV( + k: "7a78766965777366616b650000000000", + v: "", + ), + KV( + k: "7a78766965777366616b650003e80000", + v: "", + ), + KV( + k: "7a78766965777366616b650f423f0000", + v: "", + ), +] diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_tx_ids_raw_data@mainnet_keys.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_tx_ids_raw_data@mainnet_keys.snap new file mode 100644 index 00000000000..34ac710bbf2 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_tx_ids_raw_data@mainnet_keys.snap @@ -0,0 +1,14 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "7a78766965777331713064757974676371717171707172653236776b6c343567767777776437303678773630386875636d7666616c72373539656a7766377173686a663572396161373332337a756c767a36706c68747470356d6c747163677339743033396378326430396d67713035747336336e387533356879763668396e633963747171747565327537636572326d716567756e75756c71326c7568713379776a637a333579796c6a657761346d676b676a7a79667768366672366a6430647a64343467686b306e78647632686e76346a356e7866777632347277646d676c6c68653070383536387367717439636b74303276326b786635616874716c3673306c746a706b636b77386774796d787478757539676372307377767a06657f0000", + v: "", + ), + KV( + k: "7a78766965777366616b650f423f0000", + v: "", + ), +] diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_tx_ids_raw_data@testnet_0.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_tx_ids_raw_data@testnet_0.snap new file mode 100644 index 00000000000..d87f517e86c --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_tx_ids_raw_data@testnet_0.snap @@ -0,0 +1,22 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "7a78766965777331713064757974676371717171707172653236776b6c343567767777776437303678773630386875636d7666616c72373539656a7766377173686a663572396161373332337a756c767a36706c68747470356d6c747163677339743033396378326430396d67713035747336336e387533356879763668396e633963747171747565327537636572326d716567756e75756c71326c7568713379776a637a333579796c6a657761346d676b676a7a79667768366672366a6430647a64343467686b306e78647632686e76346a356e7866777632347277646d676c6c68653070383536387367717439636b74303276326b786635616874716c3673306c746a706b636b77386774796d787478757539676372307377767a0000000000", + v: "c4eaa58879081de3c24a7b117ed2b28300e7ec4c4c1dff1d3f1268b7857a4ddb", + ), + KV( + k: "7a78766965777331713064757974676371717171707172653236776b6c343567767777776437303678773630386875636d7666616c72373539656a7766377173686a663572396161373332337a756c767a36706c68747470356d6c747163677339743033396378326430396d67713035747336336e387533356879763668396e633963747171747565327537636572326d716567756e75756c71326c7568713379776a637a333579796c6a657761346d676b676a7a79667768366672366a6430647a64343467686b306e78647632686e76346a356e7866777632347277646d676c6c68653070383536387367717439636b74303276326b786635616874716c3673306c746a706b636b77386774796d787478757539676372307377767a0445bf0000", + v: "", + ), + KV( + k: "7a78766965777366616b650000000000", + v: "", + ), + KV( + k: "7a78766965777366616b650f423f0000", + v: "", + ), +] diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_tx_ids_raw_data@testnet_1.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_tx_ids_raw_data@testnet_1.snap new file mode 100644 index 00000000000..3f9edcab634 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_tx_ids_raw_data@testnet_1.snap @@ -0,0 +1,30 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "7a78766965777331713064757974676371717171707172653236776b6c343567767777776437303678773630386875636d7666616c72373539656a7766377173686a663572396161373332337a756c767a36706c68747470356d6c747163677339743033396378326430396d67713035747336336e387533356879763668396e633963747171747565327537636572326d716567756e75756c71326c7568713379776a637a333579796c6a657761346d676b676a7a79667768366672366a6430647a64343467686b306e78647632686e76346a356e7866777632347277646d676c6c68653070383536387367717439636b74303276326b786635616874716c3673306c746a706b636b77386774796d787478757539676372307377767a0000000000", + v: "c4eaa58879081de3c24a7b117ed2b28300e7ec4c4c1dff1d3f1268b7857a4ddb", + ), + KV( + k: "7a78766965777331713064757974676371717171707172653236776b6c343567767777776437303678773630386875636d7666616c72373539656a7766377173686a663572396161373332337a756c767a36706c68747470356d6c747163677339743033396378326430396d67713035747336336e387533356879763668396e633963747171747565327537636572326d716567756e75756c71326c7568713379776a637a333579796c6a657761346d676b676a7a79667768366672366a6430647a64343467686b306e78647632686e76346a356e7866777632347277646d676c6c68653070383536387367717439636b74303276326b786635616874716c3673306c746a706b636b77386774796d787478757539676372307377767a0000010000", + v: "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75", + ), + KV( + k: "7a78766965777331713064757974676371717171707172653236776b6c343567767777776437303678773630386875636d7666616c72373539656a7766377173686a663572396161373332337a756c767a36706c68747470356d6c747163677339743033396378326430396d67713035747336336e387533356879763668396e633963747171747565327537636572326d716567756e75756c71326c7568713379776a637a333579796c6a657761346d676b676a7a79667768366672366a6430647a64343467686b306e78647632686e76346a356e7866777632347277646d676c6c68653070383536387367717439636b74303276326b786635616874716c3673306c746a706b636b77386774796d787478757539676372307377767a0445bf0000", + v: "", + ), + KV( + k: "7a78766965777366616b650000000000", + v: "", + ), + KV( + k: "7a78766965777366616b650003e80000", + v: "", + ), + KV( + k: "7a78766965777366616b650f423f0000", + v: "", + ), +] diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_tx_ids_raw_data@testnet_2.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_tx_ids_raw_data@testnet_2.snap new file mode 100644 index 00000000000..7793467911f --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_tx_ids_raw_data@testnet_2.snap @@ -0,0 +1,34 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "7a78766965777331713064757974676371717171707172653236776b6c343567767777776437303678773630386875636d7666616c72373539656a7766377173686a663572396161373332337a756c767a36706c68747470356d6c747163677339743033396378326430396d67713035747336336e387533356879763668396e633963747171747565327537636572326d716567756e75756c71326c7568713379776a637a333579796c6a657761346d676b676a7a79667768366672366a6430647a64343467686b306e78647632686e76346a356e7866777632347277646d676c6c68653070383536387367717439636b74303276326b786635616874716c3673306c746a706b636b77386774796d787478757539676372307377767a0000000000", + v: "c4eaa58879081de3c24a7b117ed2b28300e7ec4c4c1dff1d3f1268b7857a4ddb", + ), + KV( + k: "7a78766965777331713064757974676371717171707172653236776b6c343567767777776437303678773630386875636d7666616c72373539656a7766377173686a663572396161373332337a756c767a36706c68747470356d6c747163677339743033396378326430396d67713035747336336e387533356879763668396e633963747171747565327537636572326d716567756e75756c71326c7568713379776a637a333579796c6a657761346d676b676a7a79667768366672366a6430647a64343467686b306e78647632686e76346a356e7866777632347277646d676c6c68653070383536387367717439636b74303276326b786635616874716c3673306c746a706b636b77386774796d787478757539676372307377767a0000010000", + v: "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75", + ), + KV( + k: "7a78766965777331713064757974676371717171707172653236776b6c343567767777776437303678773630386875636d7666616c72373539656a7766377173686a663572396161373332337a756c767a36706c68747470356d6c747163677339743033396378326430396d67713035747336336e387533356879763668396e633963747171747565327537636572326d716567756e75756c71326c7568713379776a637a333579796c6a657761346d676b676a7a79667768366672366a6430647a64343467686b306e78647632686e76346a356e7866777632347277646d676c6c68653070383536387367717439636b74303276326b786635616874716c3673306c746a706b636b77386774796d787478757539676372307377767a0000020000", + v: "5822c0532da8a008259ac39933d3210e508c17e3ba21d2b2c428785efdccb3d5", + ), + KV( + k: "7a78766965777331713064757974676371717171707172653236776b6c343567767777776437303678773630386875636d7666616c72373539656a7766377173686a663572396161373332337a756c767a36706c68747470356d6c747163677339743033396378326430396d67713035747336336e387533356879763668396e633963747171747565327537636572326d716567756e75756c71326c7568713379776a637a333579796c6a657761346d676b676a7a79667768366672366a6430647a64343467686b306e78647632686e76346a356e7866777632347277646d676c6c68653070383536387367717439636b74303276326b786635616874716c3673306c746a706b636b77386774796d787478757539676372307377767a0445bf0000", + v: "", + ), + KV( + k: "7a78766965777366616b650000000000", + v: "", + ), + KV( + k: "7a78766965777366616b650003e80000", + v: "", + ), + KV( + k: "7a78766965777366616b650f423f0000", + v: "", + ), +] diff --git a/zebra-scan/src/storage/db/tests/snapshots/sapling_tx_ids_raw_data@testnet_keys.snap b/zebra-scan/src/storage/db/tests/snapshots/sapling_tx_ids_raw_data@testnet_keys.snap new file mode 100644 index 00000000000..19340f88225 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/snapshots/sapling_tx_ids_raw_data@testnet_keys.snap @@ -0,0 +1,14 @@ +--- +source: zebra-scan/src/storage/db/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "7a78766965777331713064757974676371717171707172653236776b6c343567767777776437303678773630386875636d7666616c72373539656a7766377173686a663572396161373332337a756c767a36706c68747470356d6c747163677339743033396378326430396d67713035747336336e387533356879763668396e633963747171747565327537636572326d716567756e75756c71326c7568713379776a637a333579796c6a657761346d676b676a7a79667768366672366a6430647a64343467686b306e78647632686e76346a356e7866777632347277646d676c6c68653070383536387367717439636b74303276326b786635616874716c3673306c746a706b636b77386774796d787478757539676372307377767a0445bf0000", + v: "", + ), + KV( + k: "7a78766965777366616b650f423f0000", + v: "", + ), +] diff --git a/zebra-scan/src/storage/db/tests/vectors.rs b/zebra-scan/src/storage/db/tests/vectors.rs new file mode 100644 index 00000000000..f02165d7111 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/vectors.rs @@ -0,0 +1,133 @@ +//! Fixed test vectors for the scanner Storage. + +use zebra_chain::{block::Height, parameters::Network}; +use zebra_state::TransactionIndex; + +use crate::{ + storage::db::tests::{fake_sapling_results, new_test_storage}, + tests::ZECPAGES_SAPLING_VIEWING_KEY, +}; + +/// Tests that keys are deleted correctly +#[test] +pub fn deletes_keys_and_results_correctly() { + let mut db = new_test_storage(Network::Mainnet); + + let zec_pages_sapling_efvk = ZECPAGES_SAPLING_VIEWING_KEY.to_string(); + + // Replace the last letter of the zec_pages efvk + let fake_efvk = format!( + "{}t", + &ZECPAGES_SAPLING_VIEWING_KEY[..ZECPAGES_SAPLING_VIEWING_KEY.len() - 1] + ); + + let efvks = [&zec_pages_sapling_efvk, &fake_efvk]; + let fake_heights = [Height::MIN, Height(1), Height::MAX]; + let fake_transaction_indexes = [ + TransactionIndex::MIN, + TransactionIndex::from_index(40), + TransactionIndex::MAX, + ]; + + for efvk in efvks { + for fake_result_height in fake_heights { + db.insert_sapling_results( + efvk, + fake_result_height, + fake_sapling_results(fake_transaction_indexes), + ); + } + } + + let expected_num_entries = fake_heights.len(); + let expected_num_results_per_entry = fake_transaction_indexes.len(); + + for efvk in efvks { + assert_eq!( + db.sapling_results(efvk).len(), + expected_num_entries, + "there should be {expected_num_entries} entries for this key in the db" + ); + + for (_, result) in db.sapling_results(efvk) { + assert_eq!( + result.len(), + expected_num_results_per_entry, + "there should be {expected_num_results_per_entry} results for this entry in the db" + ); + } + + db.delete_sapling_keys(vec![efvk.clone()]); + + assert!( + db.sapling_results(efvk).is_empty(), + "all results for this key should have been deleted" + ); + } +} + +/// Tests that keys are deleted correctly +#[test] +pub fn clears_results_correctly() { + let mut db = new_test_storage(Network::Mainnet); + + let zec_pages_sapling_efvk = ZECPAGES_SAPLING_VIEWING_KEY.to_string(); + + // Replace the last letter of the zec_pages efvk + let fake_efvk = format!( + "{}t", + &ZECPAGES_SAPLING_VIEWING_KEY[..ZECPAGES_SAPLING_VIEWING_KEY.len() - 1] + ); + + let efvks = [&zec_pages_sapling_efvk, &fake_efvk]; + let fake_heights = [Height::MIN, Height(1), Height::MAX]; + let fake_transaction_indexes = [ + TransactionIndex::MIN, + TransactionIndex::from_index(40), + TransactionIndex::MAX, + ]; + + for efvk in efvks { + for fake_result_height in fake_heights { + db.insert_sapling_results( + efvk, + fake_result_height, + fake_sapling_results(fake_transaction_indexes), + ); + } + } + + let expected_num_entries = fake_heights.len(); + let expected_num_results_per_entry = fake_transaction_indexes.len(); + + for efvk in efvks { + assert_eq!( + db.sapling_results(efvk).len(), + expected_num_entries, + "there should be {expected_num_entries} entries for this key in the db" + ); + + for (_, result) in db.sapling_results(efvk) { + assert_eq!( + result.len(), + expected_num_results_per_entry, + "there should be {expected_num_results_per_entry} results for this entry in the db" + ); + } + + db.delete_sapling_results(vec![efvk.clone()]); + + assert_eq!( + db.sapling_results(efvk).len(), + 1, + "all results for this key should have been deleted, one empty entry should remain" + ); + + for (_, result) in db.sapling_results(efvk) { + assert!( + result.is_empty(), + "there should be no results for this entry in the db" + ); + } + } +} diff --git a/zebra-scan/src/tests.rs b/zebra-scan/src/tests.rs new file mode 100644 index 00000000000..b3a54c674d8 --- /dev/null +++ b/zebra-scan/src/tests.rs @@ -0,0 +1,365 @@ +//! Test that we can scan the Zebra blockchain using the external `zcash_client_backend` crate +//! scanning functionality. +//! +//! This tests belong to the proof of concept stage of the external wallet support functionality. + +use std::sync::Arc; + +use chrono::{DateTime, Utc}; + +use color_eyre::{Report, Result}; +use ff::{Field, PrimeField}; +use group::GroupEncoding; +use rand::{rngs::OsRng, thread_rng, RngCore}; + +use zcash_client_backend::{ + encoding::encode_extended_full_viewing_key, + proto::compact_formats::{ + ChainMetadata, CompactBlock, CompactSaplingOutput, CompactSaplingSpend, CompactTx, + }, +}; +use zcash_note_encryption::Domain; +use zcash_primitives::{ + block::BlockHash, + consensus::BlockHeight, + constants::SPENDING_KEY_GENERATOR, + memo::MemoBytes, + sapling::{ + note_encryption::{sapling_note_encryption, SaplingDomain}, + util::generate_random_rseed, + value::NoteValue, + Note, Nullifier, + }, + zip32, +}; + +use zebra_chain::{ + amount::{Amount, NegativeAllowed}, + block::{self, merkle, Block, Header, Height}, + fmt::HexDebug, + parameters::Network, + primitives::{redjubjub, Groth16Proof}, + sapling::{self, PerSpendAnchor, Spend, TransferData}, + serialization::AtLeastOne, + transaction::{LockTime, Transaction}, + transparent::{CoinbaseData, Input}, + work::{difficulty::CompactDifficulty, equihash::Solution}, +}; +use zebra_state::SaplingScanningKey; + +#[cfg(test)] +mod vectors; + +/// The extended Sapling viewing key of [ZECpages](https://zecpages.com/boardinfo) +pub const ZECPAGES_SAPLING_VIEWING_KEY: &str = "zxviews1q0duytgcqqqqpqre26wkl45gvwwwd706xw608hucmvfalr759ejwf7qshjf5r9aa7323zulvz6plhttp5mltqcgs9t039cx2d09mgq05ts63n8u35hyv6h9nc9ctqqtue2u7cer2mqegunuulq2luhq3ywjcz35yyljewa4mgkgjzyfwh6fr6jd0dzd44ghk0nxdv2hnv4j5nxfwv24rwdmgllhe0p8568sgqt9ckt02v2kxf5ahtql6s0ltjpkckw8gtymxtxuu9gcr0swvz"; + +/// A fake viewing key in an incorrect format. +pub const FAKE_SAPLING_VIEWING_KEY: &str = "zxviewsfake"; + +/// Generates `num_keys` of [`SaplingScanningKey`]s for tests for the given [`Network`]. +/// +/// The keys are seeded only from their index in the returned `Vec`, so repeated calls return same +/// keys at a particular index. +pub fn mock_sapling_scanning_keys(num_keys: u8, network: Network) -> Vec { + let mut keys: Vec = vec![]; + + for seed in 0..num_keys { + keys.push(encode_extended_full_viewing_key( + network.sapling_efvk_hrp(), + &mock_sapling_efvk(&[seed]), + )); + } + + keys +} + +/// Generates an [`zip32::sapling::ExtendedFullViewingKey`] from `seed` for tests. +#[allow(deprecated)] +pub fn mock_sapling_efvk(seed: &[u8]) -> zip32::sapling::ExtendedFullViewingKey { + // TODO: Use `to_diversifiable_full_viewing_key` since `to_extended_full_viewing_key` is + // deprecated. + zip32::sapling::ExtendedSpendingKey::master(seed).to_extended_full_viewing_key() +} + +/// Generates a fake block containing a Sapling output decryptable by `dfvk`. +/// +/// The fake block has the following transactions in this order: +/// 1. a transparent coinbase tx, +/// 2. a V4 tx containing a random Sapling output, +/// 3. a V4 tx containing a Sapling output decryptable by `dfvk`, +/// 4. depending on the value of `tx_after`, another V4 tx containing a random Sapling output. +pub fn fake_block( + height: BlockHeight, + nf: Nullifier, + dfvk: &zip32::sapling::DiversifiableFullViewingKey, + value: u64, + tx_after: bool, + initial_sapling_tree_size: Option, +) -> (Block, u32) { + let header = Header { + version: 4, + previous_block_hash: block::Hash::default(), + merkle_root: merkle::Root::default(), + commitment_bytes: HexDebug::default(), + time: DateTime::::default(), + difficulty_threshold: CompactDifficulty::default(), + nonce: HexDebug::default(), + solution: Solution::default(), + }; + + let block = fake_compact_block( + height, + BlockHash([0; 32]), + nf, + dfvk, + value, + tx_after, + initial_sapling_tree_size, + ); + + let mut transactions: Vec> = block + .vtx + .iter() + .map(|tx| compact_to_v4(tx).expect("A fake compact tx should be convertible to V4.")) + .map(Arc::new) + .collect(); + + let coinbase_input = Input::Coinbase { + height: Height(1), + data: CoinbaseData::new(vec![]), + sequence: u32::MAX, + }; + + let coinbase = Transaction::V4 { + inputs: vec![coinbase_input], + outputs: vec![], + lock_time: LockTime::Height(Height(1)), + expiry_height: Height(1), + joinsplit_data: None, + sapling_shielded_data: None, + }; + + transactions.insert(0, Arc::new(coinbase)); + + let sapling_tree_size = block + .chain_metadata + .as_ref() + .unwrap() + .sapling_commitment_tree_size; + + ( + Block { + header: Arc::new(header), + transactions, + }, + sapling_tree_size, + ) +} + +/// Create a fake compact block with provided fake account data. +// This is a copy of zcash_primitives `fake_compact_block` where the `value` argument was changed to +// be a number for easier conversion: +// https://github.com/zcash/librustzcash/blob/zcash_primitives-0.13.0/zcash_client_backend/src/scanning.rs#L635 +// We need to copy because this is a test private function upstream. +pub fn fake_compact_block( + height: BlockHeight, + prev_hash: BlockHash, + nf: Nullifier, + dfvk: &zip32::sapling::DiversifiableFullViewingKey, + value: u64, + tx_after: bool, + initial_sapling_tree_size: Option, +) -> CompactBlock { + let to = dfvk.default_address().1; + + // Create a fake Note for the account + let mut rng = OsRng; + let rseed = generate_random_rseed( + &zcash_primitives::consensus::Network::TestNetwork, + height, + &mut rng, + ); + let note = Note::from_parts(to, NoteValue::from_raw(value), rseed); + let encryptor = sapling_note_encryption::<_, zcash_primitives::consensus::Network>( + Some(dfvk.fvk().ovk), + note.clone(), + MemoBytes::empty(), + &mut rng, + ); + let cmu = note.cmu().to_bytes().to_vec(); + let ephemeral_key = + SaplingDomain::::epk_bytes(encryptor.epk()) + .0 + .to_vec(); + let enc_ciphertext = encryptor.encrypt_note_plaintext(); + + // Create a fake CompactBlock containing the note + let mut cb = CompactBlock { + hash: { + let mut hash = vec![0; 32]; + rng.fill_bytes(&mut hash); + hash + }, + prev_hash: prev_hash.0.to_vec(), + height: height.into(), + ..Default::default() + }; + + // Add a random Sapling tx before ours + { + let mut tx = random_compact_tx(&mut rng); + tx.index = cb.vtx.len() as u64; + cb.vtx.push(tx); + } + + let cspend = CompactSaplingSpend { nf: nf.0.to_vec() }; + let cout = CompactSaplingOutput { + cmu, + ephemeral_key, + ciphertext: enc_ciphertext.as_ref()[..52].to_vec(), + }; + let mut ctx = CompactTx::default(); + let mut txid = vec![0; 32]; + rng.fill_bytes(&mut txid); + ctx.hash = txid; + ctx.spends.push(cspend); + ctx.outputs.push(cout); + ctx.index = cb.vtx.len() as u64; + cb.vtx.push(ctx); + + // Optionally add another random Sapling tx after ours + if tx_after { + let mut tx = random_compact_tx(&mut rng); + tx.index = cb.vtx.len() as u64; + cb.vtx.push(tx); + } + + cb.chain_metadata = initial_sapling_tree_size.map(|s| ChainMetadata { + sapling_commitment_tree_size: s + cb + .vtx + .iter() + .map(|tx| tx.outputs.len() as u32) + .sum::(), + ..Default::default() + }); + + cb +} + +/// Create a random compact transaction. +// This is an exact copy of `zcash_client_backend::scanning::random_compact_tx`: +// https://github.com/zcash/librustzcash/blob/zcash_primitives-0.13.0/zcash_client_backend/src/scanning.rs#L597 +// We need to copy because this is a test private function upstream. +pub fn random_compact_tx(mut rng: impl RngCore) -> CompactTx { + let fake_nf = { + let mut nf = vec![0; 32]; + rng.fill_bytes(&mut nf); + nf + }; + let fake_cmu = { + let fake_cmu = bls12_381::Scalar::random(&mut rng); + fake_cmu.to_repr().as_ref().to_owned() + }; + let fake_epk = { + let mut buffer = [0; 64]; + rng.fill_bytes(&mut buffer); + let fake_esk = jubjub::Fr::from_bytes_wide(&buffer); + let fake_epk = SPENDING_KEY_GENERATOR * fake_esk; + fake_epk.to_bytes().to_vec() + }; + let cspend = CompactSaplingSpend { nf: fake_nf }; + let cout = CompactSaplingOutput { + cmu: fake_cmu, + ephemeral_key: fake_epk, + ciphertext: vec![0; 52], + }; + let mut ctx = CompactTx::default(); + let mut txid = vec![0; 32]; + rng.fill_bytes(&mut txid); + ctx.hash = txid; + ctx.spends.push(cspend); + ctx.outputs.push(cout); + ctx +} + +/// Converts [`CompactTx`] to [`Transaction::V4`]. +pub fn compact_to_v4(tx: &CompactTx) -> Result { + let sk = redjubjub::SigningKey::::new(thread_rng()); + let vk = redjubjub::VerificationKey::from(&sk); + let dummy_rk = sapling::keys::ValidatingKey::try_from(vk) + .expect("Internally generated verification key should be convertible to a validating key."); + + let spends = tx + .spends + .iter() + .map(|spend| { + Ok(Spend { + cv: sapling::NotSmallOrderValueCommitment::default(), + per_spend_anchor: sapling::tree::Root::default(), + nullifier: sapling::Nullifier::from( + spend.nf().map_err(|_| Report::msg("Invalid nullifier."))?.0, + ), + rk: dummy_rk.clone(), + zkproof: Groth16Proof([0; 192]), + spend_auth_sig: redjubjub::Signature::::from([0; 64]), + }) + }) + .collect::>>>()?; + + let spends = AtLeastOne::>::try_from(spends)?; + + let maybe_outputs = tx + .outputs + .iter() + .map(|output| { + let mut ciphertext = output.ciphertext.clone(); + ciphertext.resize(580, 0); + let ciphertext: [u8; 580] = ciphertext + .try_into() + .map_err(|_| Report::msg("Could not convert ciphertext to `[u8; 580]`"))?; + let enc_ciphertext = sapling::EncryptedNote::from(ciphertext); + + Ok(sapling::Output { + cv: sapling::NotSmallOrderValueCommitment::default(), + cm_u: Option::from(jubjub::Fq::from_bytes( + &output + .cmu() + .map_err(|_| Report::msg("Invalid commitment."))? + .to_bytes(), + )) + .ok_or(Report::msg("Invalid commitment."))?, + ephemeral_key: sapling::keys::EphemeralPublicKey::try_from( + output + .ephemeral_key() + .map_err(|_| Report::msg("Invalid ephemeral key."))? + .0, + ) + .map_err(Report::msg)?, + enc_ciphertext, + out_ciphertext: sapling::WrappedNoteKey::from([0; 80]), + zkproof: Groth16Proof([0; 192]), + }) + }) + .collect::>>()?; + + let transfers = TransferData::SpendsAndMaybeOutputs { + shared_anchor: sapling::FieldNotPresent, + spends, + maybe_outputs, + }; + + let shielded_data = sapling::ShieldedData { + value_balance: Amount::::default(), + transfers, + binding_sig: redjubjub::Signature::::from([0; 64]), + }; + + Ok(Transaction::V4 { + inputs: vec![], + outputs: vec![], + lock_time: LockTime::Height(Height(0)), + expiry_height: Height(0), + joinsplit_data: None, + sapling_shielded_data: (Some(shielded_data)), + }) +} diff --git a/zebra-scan/src/tests/vectors.rs b/zebra-scan/src/tests/vectors.rs new file mode 100644 index 00000000000..7af0d5dedb3 --- /dev/null +++ b/zebra-scan/src/tests/vectors.rs @@ -0,0 +1,202 @@ +//! Fixed integration test vectors for the scanner. + +use std::sync::Arc; + +use color_eyre::Result; + +use zcash_client_backend::{ + encoding::{decode_extended_full_viewing_key, encode_extended_full_viewing_key}, + proto::compact_formats::ChainMetadata, +}; +use zcash_primitives::{ + constants::mainnet::HRP_SAPLING_EXTENDED_FULL_VIEWING_KEY, + sapling::Nullifier, + zip32::{DiversifiableFullViewingKey, ExtendedSpendingKey}, +}; + +use zebra_chain::{ + block::{Block, Height}, + chain_tip::ChainTip, + parameters::Network, + serialization::ZcashDeserializeInto, +}; +use zebra_state::{SaplingScannedResult, TransactionIndex}; + +use crate::{ + scan::{block_to_compact, scan_block}, + storage::db::tests::new_test_storage, + tests::{fake_block, mock_sapling_efvk, ZECPAGES_SAPLING_VIEWING_KEY}, +}; + +/// This test: +/// - Creates a viewing key and a fake block containing a Sapling output decryptable by the key. +/// - Scans the block. +/// - Checks that the result contains the txid of the tx containing the Sapling output. +#[tokio::test] +async fn scanning_from_fake_generated_blocks() -> Result<()> { + let extsk = ExtendedSpendingKey::master(&[]); + let dfvk: DiversifiableFullViewingKey = extsk.to_diversifiable_full_viewing_key(); + let nf = Nullifier([7; 32]); + + let (block, sapling_tree_size) = fake_block(1u32.into(), nf, &dfvk, 1, true, Some(0)); + + assert_eq!(block.transactions.len(), 4); + + let res = scan_block(Network::Mainnet, &block, sapling_tree_size, &[&dfvk]).unwrap(); + + // The response should have one transaction relevant to the key we provided. + assert_eq!(res.transactions().len(), 1); + + // Check that the original block contains the txid in the scanning result. + assert!(block + .transactions + .iter() + .map(|tx| tx.hash().bytes_in_display_order()) + .any(|txid| &txid == res.transactions()[0].txid.as_ref())); + + // Check that the txid in the scanning result matches the third tx in the original block. + assert_eq!( + res.transactions()[0].txid.as_ref(), + &block.transactions[2].hash().bytes_in_display_order() + ); + + // The block hash of the response should be the same as the one provided. + assert_eq!(res.block_hash().0, block.hash().0); + + Ok(()) +} + +/// Scan a populated state for the ZECpages viewing key. +/// This test is very similar to `scanning_from_populated_zebra_state` but with the ZECpages key. +/// There are no zechub transactions in the test data so we should get empty related transactions. +#[tokio::test] +async fn scanning_zecpages_from_populated_zebra_state() -> Result<()> { + // Parse the key from ZECpages + let efvk = decode_extended_full_viewing_key( + HRP_SAPLING_EXTENDED_FULL_VIEWING_KEY, + ZECPAGES_SAPLING_VIEWING_KEY, + ) + .unwrap(); + + // Build a vector of viewing keys `vks` to scan for. + let fvk = efvk.fvk; + let ivk = fvk.vk.ivk(); + let ivks = vec![ivk]; + + let network = Network::Mainnet; + + // Create a continuous chain of mainnet blocks from genesis + let blocks: Vec> = zebra_test::vectors::CONTINUOUS_MAINNET_BLOCKS + .iter() + .map(|(_height, block_bytes)| block_bytes.zcash_deserialize_into().unwrap()) + .collect(); + + // Create a populated state service. + let (_state_service, read_only_state_service, latest_chain_tip, _chain_tip_change) = + zebra_state::populated_state(blocks.clone(), network).await; + + let db = read_only_state_service.db(); + + // use the tip as starting height + let mut height = latest_chain_tip.best_tip_height().unwrap(); + + let mut transactions_found = 0; + let mut transactions_scanned = 0; + let mut blocks_scanned = 0; + while let Some(block) = db.block(height.into()) { + // We use a dummy size of the Sapling note commitment tree. We can't set the size to zero + // because the underlying scanning function would return + // `zcash_client_backeng::scanning::ScanError::TreeSizeUnknown`. + let sapling_commitment_tree_size = 1; + + let orchard_commitment_tree_size = 0; + + let chain_metadata = ChainMetadata { + sapling_commitment_tree_size, + orchard_commitment_tree_size, + }; + + let compact_block = block_to_compact(&block, chain_metadata); + + let res = scan_block(network, &block, sapling_commitment_tree_size, &ivks) + .expect("scanning block for the ZECpages viewing key should work"); + + transactions_found += res.transactions().len(); + transactions_scanned += compact_block.vtx.len(); + blocks_scanned += 1; + + // scan backwards + if height.is_min() { + break; + } + height = height.previous()?; + } + + // make sure all blocks and transactions were scanned + assert_eq!(blocks_scanned, 11); + assert_eq!(transactions_scanned, 11); + + // no relevant transactions should be found + assert_eq!(transactions_found, 0); + + Ok(()) +} + +/// Creates a viewing key and a fake block containing a Sapling output decryptable by the key, scans +/// the block using the key, and adds the results to the database. +/// +/// The purpose of this test is to check if our database and our scanning code are compatible. +#[test] +fn scanning_fake_blocks_store_key_and_results() -> Result<()> { + let network = Network::Mainnet; + + // Generate a key + let efvk = mock_sapling_efvk(&[]); + let dfvk = efvk.to_diversifiable_full_viewing_key(); + let key_to_be_stored = encode_extended_full_viewing_key("zxviews", &efvk); + + // Create a database + let mut storage = new_test_storage(network); + + // Insert the generated key to the database + storage.add_sapling_key(&key_to_be_stored, None); + + // Check key was added + assert_eq!(storage.sapling_keys_last_heights().len(), 1); + assert_eq!( + storage + .sapling_keys_last_heights() + .get(&key_to_be_stored) + .expect("height is stored") + .next() + .expect("height is not maximum"), + network.sapling_activation_height() + ); + + let nf = Nullifier([7; 32]); + + let (block, sapling_tree_size) = fake_block(1u32.into(), nf, &dfvk, 1, true, Some(0)); + + let result = scan_block(Network::Mainnet, &block, sapling_tree_size, &[&dfvk]).unwrap(); + + // The response should have one transaction relevant to the key we provided. + assert_eq!(result.transactions().len(), 1); + + let result = + SaplingScannedResult::from_bytes_in_display_order(*result.transactions()[0].txid.as_ref()); + + // Add result to database + storage.add_sapling_results( + &key_to_be_stored, + Height(1), + [(TransactionIndex::from_usize(0), result)].into(), + ); + + // Check the result was added + assert_eq!( + storage.sapling_results(&key_to_be_stored).get(&Height(1)), + Some(&vec![result]) + ); + + Ok(()) +} diff --git a/zebra-script/Cargo.toml b/zebra-script/Cargo.toml index e337ab6e641..924dfdc5f9f 100644 --- a/zebra-script/Cargo.toml +++ b/zebra-script/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-script" -version = "1.0.0-beta.30" +version = "1.0.0-beta.35" authors = ["Zcash Foundation "] description = "Zebra script verification wrapping zcashd's zcash_script library" license = "MIT OR Apache-2.0" @@ -17,12 +17,12 @@ categories = ["api-bindings", "cryptography::cryptocurrencies"] [dependencies] zcash_script = "0.1.14" -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.30" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.35" } -thiserror = "1.0.48" +thiserror = "1.0.57" displaydoc = "0.2.4" [dev-dependencies] hex = "0.4.3" lazy_static = "1.4.0" -zebra-test = { path = "../zebra-test" } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.35" } diff --git a/zebra-script/src/lib.rs b/zebra-script/src/lib.rs index 24f55025bdf..43ea2fa7328 100644 --- a/zebra-script/src/lib.rs +++ b/zebra-script/src/lib.rs @@ -1,7 +1,7 @@ //! Zebra script verification wrapping zcashd's zcash_script library #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] -#![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_script")] +#![doc(html_root_url = "https://docs.rs/zebra_script")] // We allow unsafe code, so we can call zcash_script #![allow(unsafe_code)] @@ -221,7 +221,7 @@ impl CachedFfiTransaction { }; if err == zcash_script_error_t_zcash_script_ERR_OK { - let ret = ret.try_into().expect("c_uint fits in a u64"); + let ret = ret.into(); Ok(ret) } else { Err(Error::from(err)) diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index ab5d9a9d331..2853fd77f1b 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-state" -version = "1.0.0-beta.30" +version = "1.0.0-beta.35" authors = ["Zcash Foundation "] description = "State contextual verification and storage code for Zebra" license = "MIT OR Apache-2.0" @@ -22,7 +22,7 @@ progress-bar = [ "howudoin", ] -# Experimental mining RPC support +# Mining RPC support getblocktemplate-rpcs = [ "zebra-chain/getblocktemplate-rpcs", ] @@ -35,6 +35,9 @@ proptest-impl = [ "zebra-chain/proptest-impl" ] +# Experimental shielded blockchain scanning +shielded-scan = [] + # Experimental elasticsearch support elasticsearch = [ "dep:elasticsearch", @@ -44,43 +47,43 @@ elasticsearch = [ [dependencies] bincode = "1.3.3" -chrono = { version = "0.4.31", default-features = false, features = ["clock", "std"] } +chrono = { version = "0.4.34", default-features = false, features = ["clock", "std"] } dirs = "5.0.1" -futures = "0.3.28" +futures = "0.3.30" hex = "0.4.3" hex-literal = "0.4.1" humantime-serde = "1.1.1" -indexmap = "2.0.1" -itertools = "0.11.0" +indexmap = "2.2.3" +itertools = "0.12.1" lazy_static = "1.4.0" -metrics = "0.21.1" +metrics = "0.22.1" mset = "0.1.1" -regex = "1.10.2" +regex = "1.10.3" rlimit = "0.10.1" -rocksdb = { version = "0.21.0", default-features = false, features = ["lz4"] } -semver = "1.0.20" -serde = { version = "1.0.188", features = ["serde_derive"] } -tempfile = "3.8.0" -thiserror = "1.0.48" - -rayon = "1.7.0" -tokio = { version = "1.33.0", features = ["rt-multi-thread", "sync", "tracing"] } +rocksdb = { version = "0.22.0", default-features = false, features = ["lz4"] } +semver = "1.0.22" +serde = { version = "1.0.196", features = ["serde_derive"] } +tempfile = "3.10.0" +thiserror = "1.0.57" + +rayon = "1.8.1" +tokio = { version = "1.36.0", features = ["rt-multi-thread", "sync", "tracing"] } tower = { version = "0.4.13", features = ["buffer", "util"] } tracing = "0.1.39" # elasticsearch specific dependencies. # Security: avoid default dependency on openssl elasticsearch = { version = "8.5.0-alpha.1", default-features = false, features = ["rustls-tls"], optional = true } -serde_json = { version = "1.0.107", package = "serde_json", optional = true } +serde_json = { version = "1.0.113", package = "serde_json", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.30", features = ["async-error"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.35", features = ["async-error"] } # prod feature progress-bar howudoin = { version = "0.1.2", optional = true } # test feature proptest-impl -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.30", optional = true } -proptest = { version = "1.3.1", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.35", optional = true } +proptest = { version = "1.4.0", optional = true } proptest-derive = { version = "0.4.0", optional = true } [dev-dependencies] @@ -95,14 +98,14 @@ spandoc = "0.2.2" hex = { version = "0.4.3", features = ["serde"] } insta = { version = "1.33.0", features = ["ron", "redactions"] } -proptest = "1.3.1" +proptest = "1.4.0" proptest-derive = "0.4.0" rand = "0.8.5" halo2 = { package = "halo2_proofs", version = "0.3.0" } jubjub = "0.10.0" -tokio = { version = "1.33.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.36.0", features = ["full", "tracing", "test-util"] } -zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } -zebra-test = { path = "../zebra-test/" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.35", features = ["proptest-impl"] } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.35" } diff --git a/zebra-state/src/config.rs b/zebra-state/src/config.rs index fa9f85fa31f..775b41f4283 100644 --- a/zebra-state/src/config.rs +++ b/zebra-state/src/config.rs @@ -15,15 +15,12 @@ use tracing::Span; use zebra_chain::parameters::Network; use crate::{ - constants::{ - DATABASE_FORMAT_MINOR_VERSION, DATABASE_FORMAT_PATCH_VERSION, DATABASE_FORMAT_VERSION, - DATABASE_FORMAT_VERSION_FILE_NAME, - }, - BoxError, + constants::{DATABASE_FORMAT_VERSION_FILE_NAME, STATE_DATABASE_KIND}, + state_database_format_version_in_code, BoxError, }; /// Configuration for the state service. -#[derive(Clone, Debug, Deserialize, Serialize)] +#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)] #[serde(deny_unknown_fields, default)] pub struct Config { /// The root directory for storing cached block data. @@ -128,34 +125,44 @@ fn gen_temp_path(prefix: &str) -> PathBuf { } impl Config { - /// Returns the path for the finalized state database - pub fn db_path(&self, network: Network) -> PathBuf { + /// Returns the path for the database, based on the kind, major version and network. + /// Each incompatible database format or network gets its own unique path. + pub fn db_path( + &self, + db_kind: impl AsRef, + major_version: u64, + network: Network, + ) -> PathBuf { + let db_kind = db_kind.as_ref(); + let major_version = format!("v{}", major_version); let net_dir = network.lowercase_name(); if self.ephemeral { - gen_temp_path(&format!( - "zebra-state-v{}-{}-", - crate::constants::DATABASE_FORMAT_VERSION, - net_dir - )) + gen_temp_path(&format!("zebra-{db_kind}-{major_version}-{net_dir}-")) } else { self.cache_dir - .join("state") - .join(format!("v{}", crate::constants::DATABASE_FORMAT_VERSION)) + .join(db_kind) + .join(major_version) .join(net_dir) } } - /// Returns the path of the database format version file. - pub fn version_file_path(&self, network: Network) -> PathBuf { - let mut version_path = self.db_path(network); + /// Returns the path for the database format minor/patch version file, + /// based on the kind, major version and network. + pub fn version_file_path( + &self, + db_kind: impl AsRef, + major_version: u64, + network: Network, + ) -> PathBuf { + let mut version_path = self.db_path(db_kind, major_version, network); version_path.push(DATABASE_FORMAT_VERSION_FILE_NAME); version_path } - /// Construct a config for an ephemeral database + /// Returns a config for a temporary database that is deleted when it is dropped. pub fn ephemeral() -> Config { Config { ephemeral: true, @@ -189,21 +196,48 @@ impl Default for Config { // Cleaning up old database versions // TODO: put this in a different module? +/// Spawns a task that checks if there are old state database folders, +/// and deletes them from the filesystem. +/// +/// See `check_and_delete_old_databases()` for details. +pub fn check_and_delete_old_state_databases(config: &Config, network: Network) -> JoinHandle<()> { + check_and_delete_old_databases( + config, + STATE_DATABASE_KIND, + state_database_format_version_in_code().major, + network, + ) +} + /// Spawns a task that checks if there are old database folders, /// and deletes them from the filesystem. /// /// Iterate over the files and directories in the databases folder and delete if: -/// - The state directory exists. -/// - The entry is a directory. +/// - The `db_kind` directory exists. +/// - The entry in `db_kind` is a directory. /// - The directory name has a prefix `v`. /// - The directory name without the prefix can be parsed as an unsigned number. -/// - The parsed number is lower than the hardcoded `DATABASE_FORMAT_VERSION`. -pub fn check_and_delete_old_databases(config: Config) -> JoinHandle<()> { +/// - The parsed number is lower than the `major_version`. +/// +/// The network is used to generate the path, then ignored. +/// If `config` is an ephemeral database, no databases are deleted. +/// +/// # Panics +/// +/// If the path doesn't match the expected `db_kind/major_version/network` format. +pub fn check_and_delete_old_databases( + config: &Config, + db_kind: impl AsRef, + major_version: u64, + network: Network, +) -> JoinHandle<()> { let current_span = Span::current(); + let config = config.clone(); + let db_kind = db_kind.as_ref().to_string(); spawn_blocking(move || { current_span.in_scope(|| { - delete_old_databases(config); + delete_old_databases(config, db_kind, major_version, network); info!("finished old database version cleanup task"); }) }) @@ -212,20 +246,43 @@ pub fn check_and_delete_old_databases(config: Config) -> JoinHandle<()> { /// Check if there are old database folders and delete them from the filesystem. /// /// See [`check_and_delete_old_databases`] for details. -fn delete_old_databases(config: Config) { +fn delete_old_databases(config: Config, db_kind: String, major_version: u64, network: Network) { if config.ephemeral || !config.delete_old_database { return; } - info!("checking for old database versions"); + info!(db_kind, "checking for old database versions"); - let state_dir = config.cache_dir.join("state"); - if let Some(state_dir) = read_dir(&state_dir) { - for entry in state_dir.flatten() { - let deleted_state = check_and_delete_database(&config, &entry); + let mut db_path = config.db_path(&db_kind, major_version, network); + // Check and remove the network path. + assert_eq!( + db_path.file_name(), + Some(network.lowercase_name().as_ref()), + "unexpected database network path structure" + ); + assert!(db_path.pop()); + + // Check and remove the major version path, we'll iterate over them all below. + assert_eq!( + db_path.file_name(), + Some(format!("v{major_version}").as_ref()), + "unexpected database version path structure" + ); + assert!(db_path.pop()); - if let Some(deleted_state) = deleted_state { - info!(?deleted_state, "deleted outdated state directory"); + // Check for the correct database kind to iterate within. + assert_eq!( + db_path.file_name(), + Some(db_kind.as_ref()), + "unexpected database kind path structure" + ); + + if let Some(db_kind_dir) = read_dir(&db_path) { + for entry in db_kind_dir.flatten() { + let deleted_db = check_and_delete_database(&config, major_version, &entry); + + if let Some(deleted_db) = deleted_db { + info!(?deleted_db, "deleted outdated {db_kind} database directory"); } } } @@ -247,11 +304,15 @@ fn read_dir(dir: &Path) -> Option { /// See [`check_and_delete_old_databases`] for details. /// /// If the directory was deleted, returns its path. -fn check_and_delete_database(config: &Config, entry: &DirEntry) -> Option { +fn check_and_delete_database( + config: &Config, + major_version: u64, + entry: &DirEntry, +) -> Option { let dir_name = parse_dir_name(entry)?; - let version_number = parse_version_number(&dir_name)?; + let dir_major_version = parse_major_version(&dir_name)?; - if version_number >= crate::constants::DATABASE_FORMAT_VERSION { + if dir_major_version >= major_version { return None; } @@ -296,10 +357,10 @@ fn parse_dir_name(entry: &DirEntry) -> Option { None } -/// Parse the state version number from `dir_name`. +/// Parse the database major version number from `dir_name`. /// /// Returns `None` if parsing fails, or the directory name is not in the expected format. -fn parse_version_number(dir_name: &str) -> Option { +fn parse_major_version(dir_name: &str) -> Option { dir_name .strip_prefix('v') .and_then(|version| version.parse().ok()) @@ -307,23 +368,26 @@ fn parse_version_number(dir_name: &str) -> Option { // TODO: move these to the format upgrade module -/// Returns the full semantic version of the currently running database format code. -/// -/// This is the version implemented by the Zebra code that's currently running, -/// the minor and patch versions on disk can be different. -pub fn database_format_version_in_code() -> Version { - Version::new( - DATABASE_FORMAT_VERSION, - DATABASE_FORMAT_MINOR_VERSION, - DATABASE_FORMAT_PATCH_VERSION, +/// Returns the full semantic version of the on-disk state database, based on its config and network. +pub fn state_database_format_version_on_disk( + config: &Config, + network: Network, +) -> Result, BoxError> { + database_format_version_on_disk( + config, + STATE_DATABASE_KIND, + state_database_format_version_in_code().major, + network, ) } -/// Returns the full semantic version of the on-disk database. +/// Returns the full semantic version of the on-disk database, based on its config, kind, major version, +/// and network. /// /// Typically, the version is read from a version text file. /// -/// If there is an existing on-disk database, but no version file, returns `Ok(Some(major.0.0))`. +/// If there is an existing on-disk database, but no version file, +/// returns `Ok(Some(major_version.0.0))`. /// (This happens even if the database directory was just newly created.) /// /// If there is no existing on-disk database, returns `Ok(None)`. @@ -332,12 +396,14 @@ pub fn database_format_version_in_code() -> Version { /// implemented by the running Zebra code can be different. pub fn database_format_version_on_disk( config: &Config, + db_kind: impl AsRef, + major_version: u64, network: Network, ) -> Result, BoxError> { - let version_path = config.version_file_path(network); - let db_path = config.db_path(network); + let version_path = config.version_file_path(&db_kind, major_version, network); + let db_path = config.db_path(db_kind, major_version, network); - database_format_version_at_path(&version_path, &db_path) + database_format_version_at_path(&version_path, &db_path, major_version) } /// Returns the full semantic version of the on-disk database at `version_path`. @@ -346,6 +412,7 @@ pub fn database_format_version_on_disk( pub(crate) fn database_format_version_at_path( version_path: &Path, db_path: &Path, + major_version: u64, ) -> Result, BoxError> { let disk_version_file = match fs::read_to_string(version_path) { Ok(version) => Some(version), @@ -363,7 +430,7 @@ pub(crate) fn database_format_version_at_path( .ok_or("invalid database format version file")?; return Ok(Some(Version::new( - DATABASE_FORMAT_VERSION, + major_version, minor.parse()?, patch.parse()?, ))); @@ -374,7 +441,7 @@ pub(crate) fn database_format_version_at_path( match fs::metadata(db_path) { // But there is a database on disk, so it has the current major version with no upgrades. // If the database directory was just newly created, we also return this version. - Ok(_metadata) => Ok(Some(Version::new(DATABASE_FORMAT_VERSION, 0, 0))), + Ok(_metadata) => Ok(Some(Version::new(major_version, 0, 0))), // There's no version file and no database on disk, so it's a new database. // It will be created with the current version, @@ -385,44 +452,61 @@ pub(crate) fn database_format_version_at_path( } } -/// Writes `changed_version` to the on-disk database after the format is changed. -/// (Or a new database is created.) -/// -/// # Correctness -/// -/// This should only be called: -/// - after each format upgrade is complete, -/// - when creating a new database, or -/// - when an older Zebra version opens a newer database. -/// -/// # Concurrency -/// -/// This must only be called while RocksDB has an open database for `config`. -/// Otherwise, multiple Zebra processes could write the version at the same time, -/// corrupting the file. -/// -/// # Panics -/// -/// If the major versions do not match. (The format is incompatible.) -pub fn write_database_format_version_to_disk( - changed_version: &Version, - config: &Config, - network: Network, -) -> Result<(), BoxError> { - let version_path = config.version_file_path(network); +// Hide this destructive method from the public API, except in tests. +#[allow(unused_imports)] +pub(crate) use hidden::{ + write_database_format_version_to_disk, write_state_database_format_version_to_disk, +}; - // The major version is already in the directory path. - assert_eq!( - changed_version.major, DATABASE_FORMAT_VERSION, - "tried to do in-place database format change to an incompatible version" - ); +pub(crate) mod hidden { + #![allow(dead_code)] - let version = format!("{}.{}", changed_version.minor, changed_version.patch); + use super::*; - // # Concurrency - // - // The caller handles locking for this file write. - fs::write(version_path, version.as_bytes())?; + /// Writes `changed_version` to the on-disk state database after the format is changed. + /// (Or a new database is created.) + /// + /// See `write_database_format_version_to_disk()` for details. + pub fn write_state_database_format_version_to_disk( + config: &Config, + changed_version: &Version, + network: Network, + ) -> Result<(), BoxError> { + write_database_format_version_to_disk(config, STATE_DATABASE_KIND, changed_version, network) + } - Ok(()) + /// Writes `changed_version` to the on-disk database after the format is changed. + /// (Or a new database is created.) + /// + /// The database path is based on its kind, `changed_version.major`, and network. + /// + /// # Correctness + /// + /// This should only be called: + /// - after each format upgrade is complete, + /// - when creating a new database, or + /// - when an older Zebra version opens a newer database. + /// + /// # Concurrency + /// + /// This must only be called while RocksDB has an open database for `config`. + /// Otherwise, multiple Zebra processes could write the version at the same time, + /// corrupting the file. + pub fn write_database_format_version_to_disk( + config: &Config, + db_kind: impl AsRef, + changed_version: &Version, + network: Network, + ) -> Result<(), BoxError> { + let version_path = config.version_file_path(db_kind, changed_version.major, network); + + let version = format!("{}.{}", changed_version.minor, changed_version.patch); + + // # Concurrency + // + // The caller handles locking for this file write. + fs::write(version_path, version.as_bytes())?; + + Ok(()) + } } diff --git a/zebra-state/src/constants.rs b/zebra-state/src/constants.rs index af232e0dbfe..1cbe05e8342 100644 --- a/zebra-state/src/constants.rs +++ b/zebra-state/src/constants.rs @@ -6,7 +6,10 @@ use semver::Version; // For doc comment links #[allow(unused_imports)] -use crate::config::{self, Config}; +use crate::{ + config::{self, Config}, + constants, +}; pub use zebra_chain::transparent::MIN_TRANSPARENT_COINBASE_MATURITY; @@ -27,6 +30,9 @@ pub use zebra_chain::transparent::MIN_TRANSPARENT_COINBASE_MATURITY; // TODO: change to HeightDiff pub const MAX_BLOCK_REORG_HEIGHT: u32 = MIN_TRANSPARENT_COINBASE_MATURITY - 1; +/// The directory name used to distinguish the state database from Zebra's other databases or flat files. +pub const STATE_DATABASE_KIND: &str = "state"; + /// The database format major version, incremented each time the on-disk database format has a /// breaking data format change. /// @@ -38,9 +44,9 @@ pub const MAX_BLOCK_REORG_HEIGHT: u32 = MIN_TRANSPARENT_COINBASE_MATURITY - 1; /// - we previously added compatibility code, and /// - it's available in all supported Zebra versions. /// -/// Use [`config::database_format_version_in_code()`] or -/// [`config::database_format_version_on_disk()`] to get the full semantic format version. -pub(crate) const DATABASE_FORMAT_VERSION: u64 = 25; +/// Instead of using this constant directly, use [`constants::state_database_format_version_in_code()`] +/// or [`config::database_format_version_on_disk()`] to get the full semantic format version. +const DATABASE_FORMAT_VERSION: u64 = 25; /// The database format minor version, incremented each time the on-disk database format has a /// significant data format change. @@ -49,11 +55,23 @@ pub(crate) const DATABASE_FORMAT_VERSION: u64 = 25; /// - adding new column families, /// - changing the format of a column family in a compatible way, or /// - breaking changes with compatibility code in all supported Zebra versions. -pub(crate) const DATABASE_FORMAT_MINOR_VERSION: u64 = 3; +const DATABASE_FORMAT_MINOR_VERSION: u64 = 3; /// The database format patch version, incremented each time the on-disk database format has a /// significant format compatibility fix. -pub(crate) const DATABASE_FORMAT_PATCH_VERSION: u64 = 0; +const DATABASE_FORMAT_PATCH_VERSION: u64 = 0; + +/// Returns the full semantic version of the currently running state database format code. +/// +/// This is the version implemented by the Zebra code that's currently running, +/// the minor and patch versions on disk can be different. +pub fn state_database_format_version_in_code() -> Version { + Version::new( + DATABASE_FORMAT_VERSION, + DATABASE_FORMAT_MINOR_VERSION, + DATABASE_FORMAT_PATCH_VERSION, + ) +} /// Returns the highest database version that modifies the subtree index format. /// diff --git a/zebra-state/src/lib.rs b/zebra-state/src/lib.rs index ad2cec55207..59088a931bd 100644 --- a/zebra-state/src/lib.rs +++ b/zebra-state/src/lib.rs @@ -1,4 +1,4 @@ -//! State contextual verification and storage code for Zebra. 🦓 +//! State contextual verification and storage code for Zebra. //! //! # Correctness //! @@ -10,27 +10,20 @@ #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] -#![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_state")] -// -// Rust 1.72 has a false positive when nested generics are used inside Arc. -// This makes the `arc_with_non_send_sync` lint trigger on a lot of proptest code. -// -// TODO: remove this allow when Rust 1.73 is stable, because this lint bug is fixed in that release: -// -#![cfg_attr( - any(test, feature = "proptest-impl"), - allow(clippy::arc_with_non_send_sync) -)] +#![doc(html_root_url = "https://docs.rs/zebra_state")] #[macro_use] extern crate tracing; +// TODO: only export the Config struct and a few other important methods +pub mod config; +// Most constants are exported by default pub mod constants; +// Allow use in external tests #[cfg(any(test, feature = "proptest-impl"))] pub mod arbitrary; -mod config; mod error; mod request; mod response; @@ -40,10 +33,10 @@ mod service; mod tests; pub use config::{ - check_and_delete_old_databases, database_format_version_in_code, - database_format_version_on_disk, Config, + check_and_delete_old_databases, check_and_delete_old_state_databases, + database_format_version_on_disk, state_database_format_version_on_disk, Config, }; -pub use constants::MAX_BLOCK_REORG_HEIGHT; +pub use constants::{state_database_format_version_in_code, MAX_BLOCK_REORG_HEIGHT}; pub use error::{ BoxError, CloneError, CommitSemanticallyVerifiedError, DuplicateNullifierError, ValidateContextError, @@ -56,24 +49,51 @@ pub use service::{ chain_tip::{ChainTipChange, LatestChainTip, TipAction}, check, init, spawn_init, watch_receiver::WatchReceiver, - OutputIndex, OutputLocation, TransactionLocation, + OutputIndex, OutputLocation, TransactionIndex, TransactionLocation, +}; + +// Allow use in the scanner +#[cfg(feature = "shielded-scan")] +pub use service::finalized_state::{ + SaplingScannedDatabaseEntry, SaplingScannedDatabaseIndex, SaplingScannedResult, + SaplingScanningKey, +}; + +// Allow use in the scanner and external tests +#[cfg(any(test, feature = "proptest-impl", feature = "shielded-scan"))] +pub use service::{ + finalized_state::{ + DiskWriteBatch, FromDisk, IntoDisk, ReadDisk, TypedColumnFamily, WriteDisk, + WriteTypedBatch, ZebraDb, + }, + ReadStateService, }; #[cfg(feature = "getblocktemplate-rpcs")] pub use response::GetBlockTemplateChainInfo; +// Allow use in external tests #[cfg(any(test, feature = "proptest-impl"))] pub use service::{ arbitrary::{populated_state, CHAIN_TIP_UPDATE_WAIT_LIMIT}, chain_tip::{ChainTipBlock, ChainTipSender}, - finalized_state::{DiskWriteBatch, MAX_ON_DISK_HEIGHT}, - init_test, init_test_services, ReadStateService, + finalized_state::{RawBytes, KV, MAX_ON_DISK_HEIGHT}, + init_test, init_test_services, }; #[cfg(any(test, feature = "proptest-impl"))] -pub use config::write_database_format_version_to_disk; +pub use constants::latest_version_for_adding_subtrees; #[cfg(any(test, feature = "proptest-impl"))] -pub use constants::latest_version_for_adding_subtrees; +pub use config::hidden::{ + write_database_format_version_to_disk, write_state_database_format_version_to_disk, +}; + +// Allow use only inside the crate in production +#[cfg(not(any(test, feature = "proptest-impl")))] +#[allow(unused_imports)] +pub(crate) use config::hidden::{ + write_database_format_version_to_disk, write_state_database_format_version_to_disk, +}; pub(crate) use request::ContextuallyVerifiedBlock; diff --git a/zebra-state/src/request.rs b/zebra-state/src/request.rs index f2513119e2d..2a6b3b1a5b8 100644 --- a/zebra-state/src/request.rs +++ b/zebra-state/src/request.rs @@ -224,6 +224,9 @@ pub struct ContextuallyVerifiedBlock { } /// Wraps note commitment trees and the history tree together. +/// +/// The default instance represents the treestate that corresponds to the genesis block. +#[derive(Clone, Debug, Default, Eq, PartialEq)] pub struct Treestate { /// Note commitment trees. pub note_commitment_trees: NoteCommitmentTrees, @@ -253,20 +256,6 @@ impl Treestate { } } -/// Contains a block ready to be committed together with its associated -/// treestate. -/// -/// Zebra's non-finalized state passes this `struct` over to the finalized state -/// when committing a block. The associated treestate is passed so that the -/// finalized state does not have to retrieve the previous treestate from the -/// database and recompute a new one. -pub struct SemanticallyVerifiedBlockWithTrees { - /// A block ready to be committed. - pub verified: SemanticallyVerifiedBlock, - /// The tresstate associated with the block. - pub treestate: Treestate, -} - /// Contains a block ready to be committed. /// /// Zebra's state service passes this `enum` over to the finalized state @@ -281,6 +270,54 @@ pub enum FinalizableBlock { }, } +/// Contains a block with all its associated data that the finalized state can commit to its +/// database. +/// +/// Note that it's the constructor's responsibility to ensure that all data is valid and verified. +pub struct FinalizedBlock { + /// The block to commit to the state. + pub(super) block: Arc, + /// The hash of the block. + pub(super) hash: block::Hash, + /// The height of the block. + pub(super) height: block::Height, + /// New transparent outputs created in this block, indexed by + /// [`OutPoint`](transparent::OutPoint). + pub(super) new_outputs: HashMap, + /// A precomputed list of the hashes of the transactions in this block, in the same order as + /// `block.transactions`. + pub(super) transaction_hashes: Arc<[transaction::Hash]>, + /// The tresstate associated with the block. + pub(super) treestate: Treestate, +} + +impl FinalizedBlock { + /// Constructs [`FinalizedBlock`] from [`CheckpointVerifiedBlock`] and its [`Treestate`]. + pub fn from_checkpoint_verified(block: CheckpointVerifiedBlock, treestate: Treestate) -> Self { + Self::from_semantically_verified(SemanticallyVerifiedBlock::from(block), treestate) + } + + /// Constructs [`FinalizedBlock`] from [`ContextuallyVerifiedBlock`] and its [`Treestate`]. + pub fn from_contextually_verified( + block: ContextuallyVerifiedBlock, + treestate: Treestate, + ) -> Self { + Self::from_semantically_verified(SemanticallyVerifiedBlock::from(block), treestate) + } + + /// Constructs [`FinalizedBlock`] from [`SemanticallyVerifiedBlock`] and its [`Treestate`]. + fn from_semantically_verified(block: SemanticallyVerifiedBlock, treestate: Treestate) -> Self { + Self { + block: block.block, + hash: block.hash, + height: block.height, + new_outputs: block.new_outputs, + transaction_hashes: block.transaction_hashes, + treestate, + } + } +} + impl FinalizableBlock { /// Create a new [`FinalizableBlock`] given a [`ContextuallyVerifiedBlock`]. pub fn new(contextually_verified: ContextuallyVerifiedBlock, treestate: Treestate) -> Self { @@ -705,10 +742,10 @@ impl Request { pub fn count_metric(&self) { metrics::counter!( "state.requests", - 1, "service" => "state", "type" => self.variant_name() - ); + ) + .increment(1); } } @@ -994,10 +1031,10 @@ impl ReadRequest { pub fn count_metric(&self) { metrics::counter!( "state.requests", - 1, "service" => "read_state", "type" => self.variant_name() - ); + ) + .increment(1); } } diff --git a/zebra-state/src/service.rs b/zebra-state/src/service.rs index dce5e17e9ba..64c1936475f 100644 --- a/zebra-state/src/service.rs +++ b/zebra-state/src/service.rs @@ -85,7 +85,7 @@ pub mod arbitrary; #[cfg(test)] mod tests; -pub use finalized_state::{OutputIndex, OutputLocation, TransactionLocation}; +pub use finalized_state::{OutputIndex, OutputLocation, TransactionIndex, TransactionLocation}; use self::queued_blocks::{QueuedCheckpointVerified, QueuedSemanticallyVerified, SentHashes}; @@ -520,14 +520,9 @@ impl StateService { self.max_finalized_queue_height = queued_height.0 as f64; } - metrics::gauge!( - "state.checkpoint.queued.max.height", - self.max_finalized_queue_height, - ); - metrics::gauge!( - "state.checkpoint.queued.block.count", - self.finalized_state_queued_blocks.len() as f64, - ); + metrics::gauge!("state.checkpoint.queued.max.height").set(self.max_finalized_queue_height); + metrics::gauge!("state.checkpoint.queued.block.count") + .set(self.finalized_state_queued_blocks.len() as f64); rsp_rx } @@ -583,10 +578,8 @@ impl StateService { "block commit task exited. Is Zebra shutting down?", ); } else { - metrics::gauge!( - "state.checkpoint.sent.block.height", - last_sent_finalized_block_height.0 as f64, - ); + metrics::gauge!("state.checkpoint.sent.block.height") + .set(last_sent_finalized_block_height.0 as f64); }; } } diff --git a/zebra-state/src/service/chain_tip.rs b/zebra-state/src/service/chain_tip.rs index 76f57bfabe7..04d2af863f9 100644 --- a/zebra-state/src/service/chain_tip.rs +++ b/zebra-state/src/service/chain_tip.rs @@ -628,6 +628,11 @@ impl ChainTipChange { } } } + + /// Returns the inner `LatestChainTip`. + pub fn latest_chain_tip(&self) -> LatestChainTip { + self.latest_chain_tip.clone() + } } impl Clone for ChainTipChange { diff --git a/zebra-state/src/service/check.rs b/zebra-state/src/service/check.rs index bd8dd8b8648..c89f63956e0 100644 --- a/zebra-state/src/service/check.rs +++ b/zebra-state/src/service/check.rs @@ -72,7 +72,7 @@ where .collect(); let parent_block = relevant_chain - .get(0) + .first() .expect("state must contain parent block to do contextual validation"); let parent_block = parent_block.borrow(); let parent_height = parent_block diff --git a/zebra-state/src/service/check/utxo.rs b/zebra-state/src/service/check/utxo.rs index 186f89d83af..324efa3c035 100644 --- a/zebra-state/src/service/check/utxo.rs +++ b/zebra-state/src/service/check/utxo.rs @@ -196,8 +196,7 @@ pub fn transparent_coinbase_spend( match spend_restriction { OnlyShieldedOutputs { spend_height } => { - let min_spend_height = - utxo.height + MIN_TRANSPARENT_COINBASE_MATURITY.try_into().unwrap(); + let min_spend_height = utxo.height + MIN_TRANSPARENT_COINBASE_MATURITY.into(); let min_spend_height = min_spend_height.expect("valid UTXOs have coinbase heights far below Height::MAX"); if spend_height >= min_spend_height { diff --git a/zebra-state/src/service/finalized_state.rs b/zebra-state/src/service/finalized_state.rs index fde5c414c28..bdcb8db6ebf 100644 --- a/zebra-state/src/service/finalized_state.rs +++ b/zebra-state/src/service/finalized_state.rs @@ -2,9 +2,8 @@ //! //! Zebra's database is implemented in 4 layers: //! - [`FinalizedState`]: queues, validates, and commits blocks, using... -//! - [`ZebraDb`]: reads and writes [`zebra_chain`] types to the database, using... -//! - [`DiskDb`](disk_db::DiskDb): reads and writes format-specific types -//! to the database, using... +//! - [`ZebraDb`]: reads and writes [`zebra_chain`] types to the state database, using... +//! - [`DiskDb`]: reads and writes generic types to any column family in the database, using... //! - [`disk_format`]: converts types to raw database bytes. //! //! These layers allow us to split [`zebra_chain`] types for efficient database storage. @@ -12,8 +11,8 @@ //! //! # Correctness //! -//! The [`crate::constants::DATABASE_FORMAT_VERSION`] constant must -//! be incremented each time the database format (column, serialization, etc) changes. +//! [`crate::constants::state_database_format_version_in_code()`] must be incremented +//! each time the database format (column, serialization, etc) changes. use std::{ io::{stderr, stdout, Write}, @@ -23,11 +22,14 @@ use std::{ use zebra_chain::{block, parallel::tree::NoteCommitmentTrees, parameters::Network}; use crate::{ - request::{FinalizableBlock, SemanticallyVerifiedBlockWithTrees, Treestate}, + constants::{state_database_format_version_in_code, STATE_DATABASE_KIND}, + request::{FinalizableBlock, FinalizedBlock, Treestate}, service::{check, QueuedCheckpointVerified}, BoxError, CheckpointVerifiedBlock, CloneError, Config, }; +pub mod column_family; + mod disk_db; mod disk_format; mod zebra_db; @@ -38,14 +40,61 @@ mod arbitrary; #[cfg(test)] mod tests; -pub use disk_format::{OutputIndex, OutputLocation, TransactionLocation, MAX_ON_DISK_HEIGHT}; +#[allow(unused_imports)] +pub use column_family::{TypedColumnFamily, WriteTypedBatch}; +#[allow(unused_imports)] +pub use disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}; +#[allow(unused_imports)] +pub use disk_format::{ + FromDisk, IntoDisk, OutputIndex, OutputLocation, RawBytes, TransactionIndex, + TransactionLocation, MAX_ON_DISK_HEIGHT, +}; +pub use zebra_db::ZebraDb; -pub(super) use zebra_db::ZebraDb; +#[cfg(feature = "shielded-scan")] +pub use disk_format::{ + SaplingScannedDatabaseEntry, SaplingScannedDatabaseIndex, SaplingScannedResult, + SaplingScanningKey, +}; #[cfg(any(test, feature = "proptest-impl"))] -pub use disk_db::DiskWriteBatch; -#[cfg(not(any(test, feature = "proptest-impl")))] -use disk_db::DiskWriteBatch; +pub use disk_format::KV; + +/// The column families supported by the running `zebra-state` database code. +/// +/// Existing column families that aren't listed here are preserved when the database is opened. +pub const STATE_COLUMN_FAMILIES_IN_CODE: &[&str] = &[ + // Blocks + "hash_by_height", + "height_by_hash", + "block_header_by_height", + // Transactions + "tx_by_loc", + "hash_by_tx_loc", + "tx_loc_by_hash", + // Transparent + "balance_by_transparent_addr", + "tx_loc_by_transparent_addr_loc", + "utxo_by_out_loc", + "utxo_loc_by_transparent_addr_loc", + // Sprout + "sprout_nullifiers", + "sprout_anchors", + "sprout_note_commitment_tree", + // Sapling + "sapling_nullifiers", + "sapling_anchors", + "sapling_note_commitment_tree", + "sapling_note_commitment_subtree", + // Orchard + "orchard_nullifiers", + "orchard_anchors", + "orchard_note_commitment_tree", + "orchard_note_commitment_subtree", + // Chain + "history_tree", + "tip_chain_value_pool", +]; /// The finalized part of the chain state, stored in the db. /// @@ -62,9 +111,6 @@ pub struct FinalizedState { // This configuration cannot be modified after the database is initialized, // because some clones would have different values. // - /// The configured network. - network: Network, - /// The configured stop height. /// /// Commit blocks to the finalized state up to this height, then exit Zebra. @@ -104,6 +150,7 @@ impl FinalizedState { false, #[cfg(feature = "elasticsearch")] elastic_db, + false, ) } @@ -116,12 +163,22 @@ impl FinalizedState { network: Network, debug_skip_format_upgrades: bool, #[cfg(feature = "elasticsearch")] elastic_db: Option, + read_only: bool, ) -> Self { - let db = ZebraDb::new(config, network, debug_skip_format_upgrades); + let db = ZebraDb::new( + config, + STATE_DATABASE_KIND, + &state_database_format_version_in_code(), + network, + debug_skip_format_upgrades, + STATE_COLUMN_FAMILIES_IN_CODE + .iter() + .map(ToString::to_string), + read_only, + ); #[cfg(feature = "elasticsearch")] let new_state = Self { - network, debug_stop_at_height: config.debug_stop_at_height.map(block::Height), db, elastic_db, @@ -130,7 +187,6 @@ impl FinalizedState { #[cfg(not(feature = "elasticsearch"))] let new_state = Self { - network, debug_stop_at_height: config.debug_stop_at_height.map(block::Height), db, }; @@ -179,7 +235,7 @@ impl FinalizedState { /// Returns the configured network for this database. pub fn network(&self) -> Network { - self.network + self.db.network() } /// Commit a checkpoint-verified block to the state. @@ -199,26 +255,20 @@ impl FinalizedState { ); if result.is_ok() { - metrics::counter!("state.checkpoint.finalized.block.count", 1); - metrics::gauge!( - "state.checkpoint.finalized.block.height", - checkpoint_verified.height.0 as f64, - ); + metrics::counter!("state.checkpoint.finalized.block.count").increment(1); + metrics::gauge!("state.checkpoint.finalized.block.height") + .set(checkpoint_verified.height.0 as f64); // This height gauge is updated for both fully verified and checkpoint blocks. // These updates can't conflict, because the state makes sure that blocks // are committed in order. - metrics::gauge!( - "zcash.chain.verified.block.height", - checkpoint_verified.height.0 as f64, - ); - metrics::counter!("zcash.chain.verified.block.total", 1); + metrics::gauge!("zcash.chain.verified.block.height") + .set(checkpoint_verified.height.0 as f64); + metrics::counter!("zcash.chain.verified.block.total").increment(1); } else { - metrics::counter!("state.checkpoint.error.block.count", 1); - metrics::gauge!( - "state.checkpoint.error.block.height", - checkpoint_verified.height.0 as f64, - ); + metrics::counter!("state.checkpoint.error.block.count").increment(1); + metrics::gauge!("state.checkpoint.error.block.height") + .set(checkpoint_verified.height.0 as f64); }; // Make the error cloneable, so we can send it to the block verify future, @@ -290,7 +340,7 @@ impl FinalizedState { // thread, if it shows up in profiles check::block_commitment_is_valid_for_chain_history( block.clone(), - self.network, + self.network(), &history_tree, )?; @@ -302,17 +352,15 @@ impl FinalizedState { let sapling_root = note_commitment_trees.sapling.root(); let orchard_root = note_commitment_trees.orchard.root(); history_tree_mut.push(self.network(), block.clone(), sapling_root, orchard_root)?; + let treestate = Treestate { + note_commitment_trees, + history_tree, + }; ( checkpoint_verified.height, checkpoint_verified.hash, - SemanticallyVerifiedBlockWithTrees { - verified: checkpoint_verified.0, - treestate: Treestate { - note_commitment_trees, - history_tree, - }, - }, + FinalizedBlock::from_checkpoint_verified(checkpoint_verified, treestate), Some(prev_note_commitment_trees), ) } @@ -322,10 +370,7 @@ impl FinalizedState { } => ( contextually_verified.height, contextually_verified.hash, - SemanticallyVerifiedBlockWithTrees { - verified: contextually_verified.into(), - treestate, - }, + FinalizedBlock::from_contextually_verified(contextually_verified, treestate), prev_note_commitment_trees, ), }; @@ -336,7 +381,7 @@ impl FinalizedState { // Assert that callers (including unit tests) get the chain order correct if self.db.is_empty() { assert_eq!( - committed_tip_hash, finalized.verified.block.header.previous_block_hash, + committed_tip_hash, finalized.block.header.previous_block_hash, "the first block added to an empty state must be a genesis block, source: {source}", ); assert_eq!( @@ -352,23 +397,26 @@ impl FinalizedState { ); assert_eq!( - committed_tip_hash, finalized.verified.block.header.previous_block_hash, + committed_tip_hash, finalized.block.header.previous_block_hash, "committed block must be a child of the finalized tip, source: {source}", ); } #[cfg(feature = "elasticsearch")] - let finalized_block = finalized.verified.block.clone(); + let finalized_inner_block = finalized.block.clone(); let note_commitment_trees = finalized.treestate.note_commitment_trees.clone(); - let result = - self.db - .write_block(finalized, prev_note_commitment_trees, self.network, source); + let result = self.db.write_block( + finalized, + prev_note_commitment_trees, + self.network(), + source, + ); if result.is_ok() { // Save blocks to elasticsearch if the feature is enabled. #[cfg(feature = "elasticsearch")] - self.elasticsearch(&finalized_block); + self.elasticsearch(&finalized_inner_block); // TODO: move the stop height check to the syncer (#3442) if self.is_at_stop_height(height) { @@ -404,15 +452,11 @@ impl FinalizedState { let block_time = block.header.time.timestamp(); let local_time = chrono::Utc::now().timestamp(); - // Mainnet bulk size is small enough to avoid the elasticsearch 100mb content - // length limitation. MAX_BLOCK_BYTES = 2MB but each block use around 4.1 MB of JSON. + // Bulk size is small enough to avoid the elasticsearch 100mb content length limitation. + // MAX_BLOCK_BYTES = 2MB but each block use around 4.1 MB of JSON. // Each block count as 2 as we send them with a operation/header line. A value of 48 // is 24 blocks. - const MAINNET_AWAY_FROM_TIP_BULK_SIZE: usize = 48; - - // Testnet bulk size is larger as blocks are generally smaller in the testnet. - // A value of 800 is 400 blocks as we are not counting the operation line. - const TESTNET_AWAY_FROM_TIP_BULK_SIZE: usize = 800; + const AWAY_FROM_TIP_BULK_SIZE: usize = 48; // The number of blocks the bulk will have when we are in sync. // A value of 2 means only 1 block as we want to insert them as soon as we get @@ -423,10 +467,7 @@ impl FinalizedState { // less than this number of seconds. const CLOSE_TO_TIP_SECONDS: i64 = 14400; // 4 hours - let mut blocks_size_to_dump = match self.network { - Network::Mainnet => MAINNET_AWAY_FROM_TIP_BULK_SIZE, - Network::Testnet => TESTNET_AWAY_FROM_TIP_BULK_SIZE, - }; + let mut blocks_size_to_dump = AWAY_FROM_TIP_BULK_SIZE; // If we are close to the tip, index one block per bulk call. if local_time - block_time < CLOSE_TO_TIP_SECONDS { @@ -453,7 +494,7 @@ impl FinalizedState { let rt = tokio::runtime::Runtime::new() .expect("runtime creation for elasticsearch should not fail."); let blocks = self.elastic_blocks.clone(); - let network = self.network; + let network = self.network(); rt.block_on(async move { let response = client diff --git a/zebra-state/src/service/finalized_state/column_family.rs b/zebra-state/src/service/finalized_state/column_family.rs new file mode 100644 index 00000000000..2befbf131e2 --- /dev/null +++ b/zebra-state/src/service/finalized_state/column_family.rs @@ -0,0 +1,327 @@ +//! Type-safe column family access. + +// When these types aren't exported, they become dead code. +#![cfg_attr( + not(any(test, feature = "proptest-impl", feature = "shielded-scan")), + allow(dead_code) +)] + +use std::{ + any::type_name, + collections::{BTreeMap, HashMap}, + fmt::Debug, + hash::Hash, + marker::PhantomData, + ops::RangeBounds, +}; + +use crate::service::finalized_state::{DiskWriteBatch, FromDisk, IntoDisk, ReadDisk, WriteDisk}; + +use super::DiskDb; + +/// A type-safe read-only column family reference. +/// +/// Use this struct instead of raw [`ReadDisk`] access, because it is type-safe. +/// So you only have to define the types once, and you can't accidentally use different types for +/// reading and writing. (Which is a source of subtle database bugs.) +#[derive(Clone)] +pub struct TypedColumnFamily<'cf, Key, Value> +where + Key: IntoDisk + FromDisk + Debug, + Value: IntoDisk + FromDisk, +{ + /// The database. + db: DiskDb, + + /// The column family reference in the database. + cf: rocksdb::ColumnFamilyRef<'cf>, + + /// The column family name, only used for debugging and equality checking. + _cf_name: String, + + /// A marker type used to bind the key and value types to the struct. + _marker: PhantomData<(Key, Value)>, +} + +/// A type-safe and drop-safe batch write to a column family. +/// +/// Use this struct instead of raw [`WriteDisk`] access, because it is type-safe. +/// So you only have to define the types once, and you can't accidentally use different types for +/// reading and writing. (Which is a source of subtle database bugs.) +/// +/// This type is also drop-safe: unwritten batches have to be specifically ignored. +#[must_use = "batches must be written to the database"] +#[derive(Debug, Eq, PartialEq)] +pub struct WriteTypedBatch<'cf, Key, Value, Batch> +where + Key: IntoDisk + FromDisk + Debug, + Value: IntoDisk + FromDisk, + Batch: WriteDisk, +{ + inner: TypedColumnFamily<'cf, Key, Value>, + + batch: Batch, +} + +impl<'cf, Key, Value> Debug for TypedColumnFamily<'cf, Key, Value> +where + Key: IntoDisk + FromDisk + Debug, + Value: IntoDisk + FromDisk, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct(&format!( + "TypedColumnFamily<{}, {}>", + type_name::(), + type_name::() + )) + .field("db", &self.db) + .field("cf", &self._cf_name) + .finish() + } +} + +impl<'cf, Key, Value> PartialEq for TypedColumnFamily<'cf, Key, Value> +where + Key: IntoDisk + FromDisk + Debug, + Value: IntoDisk + FromDisk, +{ + fn eq(&self, other: &Self) -> bool { + self.db == other.db && self._cf_name == other._cf_name + } +} + +impl<'cf, Key, Value> Eq for TypedColumnFamily<'cf, Key, Value> +where + Key: IntoDisk + FromDisk + Debug, + Value: IntoDisk + FromDisk, +{ +} + +impl<'cf, Key, Value> TypedColumnFamily<'cf, Key, Value> +where + Key: IntoDisk + FromDisk + Debug, + Value: IntoDisk + FromDisk, +{ + // Creation + + /// Returns a new typed column family, if it exists in the database. + pub fn new(db: &'cf DiskDb, cf_name: &str) -> Option { + let cf = db.cf_handle(cf_name)?; + + Some(Self { + db: db.clone(), + cf, + _cf_name: cf_name.to_string(), + _marker: PhantomData, + }) + } + + // Writing + + /// Returns a typed writer for this column family for a new batch. + /// + /// These methods are the only way to get a `WriteTypedBatch`, which ensures + /// that the read and write types are consistent. + pub fn new_batch_for_writing(self) -> WriteTypedBatch<'cf, Key, Value, DiskWriteBatch> { + WriteTypedBatch { + inner: self, + batch: DiskWriteBatch::new(), + } + } + + /// Wraps an existing write batch, and returns a typed writer for this column family. + /// + /// These methods are the only way to get a `WriteTypedBatch`, which ensures + /// that the read and write types are consistent. + pub fn take_batch_for_writing( + self, + batch: DiskWriteBatch, + ) -> WriteTypedBatch<'cf, Key, Value, DiskWriteBatch> { + WriteTypedBatch { inner: self, batch } + } + + /// Wraps an existing write batch reference, and returns a typed writer for this column family. + /// + /// These methods are the only way to get a `WriteTypedBatch`, which ensures + /// that the read and write types are consistent. + pub fn with_batch_for_writing( + self, + batch: &mut DiskWriteBatch, + ) -> WriteTypedBatch<'cf, Key, Value, &mut DiskWriteBatch> { + WriteTypedBatch { inner: self, batch } + } + + // Reading + + /// Returns true if this rocksdb column family does not contain any entries. + pub fn zs_is_empty(&self) -> bool { + self.db.zs_is_empty(&self.cf) + } + + /// Returns the value for `key` in this rocksdb column family, if present. + pub fn zs_get(&self, key: &Key) -> Option { + self.db.zs_get(&self.cf, key) + } + + /// Check if this rocksdb column family contains the serialized form of `key`. + pub fn zs_contains(&self, key: &Key) -> bool { + self.db.zs_contains(&self.cf, key) + } + + /// Returns the lowest key in this column family, and the corresponding value. + /// + /// Returns `None` if this column family is empty. + pub fn zs_first_key_value(&self) -> Option<(Key, Value)> { + self.db.zs_first_key_value(&self.cf) + } + + /// Returns the highest key in this column family, and the corresponding value. + /// + /// Returns `None` if this column family is empty. + pub fn zs_last_key_value(&self) -> Option<(Key, Value)> { + self.db.zs_last_key_value(&self.cf) + } + + /// Returns the first key greater than or equal to `lower_bound` in this column family, + /// and the corresponding value. + /// + /// Returns `None` if there are no keys greater than or equal to `lower_bound`. + pub fn zs_next_key_value_from(&self, lower_bound: &Key) -> Option<(Key, Value)> { + self.db.zs_next_key_value_from(&self.cf, lower_bound) + } + + /// Returns the first key strictly greater than `lower_bound` in this column family, + /// and the corresponding value. + /// + /// Returns `None` if there are no keys greater than `lower_bound`. + pub fn zs_next_key_value_strictly_after(&self, lower_bound: &Key) -> Option<(Key, Value)> { + self.db + .zs_next_key_value_strictly_after(&self.cf, lower_bound) + } + + /// Returns the first key less than or equal to `upper_bound` in this column family, + /// and the corresponding value. + /// + /// Returns `None` if there are no keys less than or equal to `upper_bound`. + pub fn zs_prev_key_value_back_from(&self, upper_bound: &Key) -> Option<(Key, Value)> { + self.db.zs_prev_key_value_back_from(&self.cf, upper_bound) + } + + /// Returns the first key strictly less than `upper_bound` in this column family, + /// and the corresponding value. + /// + /// Returns `None` if there are no keys less than `upper_bound`. + pub fn zs_prev_key_value_strictly_before(&self, upper_bound: &Key) -> Option<(Key, Value)> { + self.db + .zs_prev_key_value_strictly_before(&self.cf, upper_bound) + } + + /// Returns a forward iterator over the items in this column family in `range`. + /// + /// Holding this iterator open might delay block commit transactions. + pub fn zs_forward_range_iter( + &self, + range: Range, + ) -> impl Iterator + '_ + where + Range: RangeBounds, + { + self.db.zs_forward_range_iter(&self.cf, range) + } + + /// Returns a reverse iterator over the items in this column family in `range`. + /// + /// Holding this iterator open might delay block commit transactions. + pub fn zs_reverse_range_iter( + &self, + range: Range, + ) -> impl Iterator + '_ + where + Range: RangeBounds, + { + self.db.zs_reverse_range_iter(&self.cf, range) + } +} + +impl<'cf, Key, Value> TypedColumnFamily<'cf, Key, Value> +where + Key: IntoDisk + FromDisk + Debug + Ord, + Value: IntoDisk + FromDisk, +{ + /// Returns the keys and values in this column family in `range`, in an ordered `BTreeMap`. + /// + /// Holding this iterator open might delay block commit transactions. + pub fn zs_items_in_range_ordered(&self, range: Range) -> BTreeMap + where + Range: RangeBounds, + { + self.db.zs_items_in_range_ordered(&self.cf, range) + } +} + +impl<'cf, Key, Value> TypedColumnFamily<'cf, Key, Value> +where + Key: IntoDisk + FromDisk + Debug + Hash + Eq, + Value: IntoDisk + FromDisk, +{ + /// Returns the keys and values in this column family in `range`, in an unordered `HashMap`. + /// + /// Holding this iterator open might delay block commit transactions. + pub fn zs_items_in_range_unordered(&self, range: Range) -> HashMap + where + Range: RangeBounds, + { + self.db.zs_items_in_range_unordered(&self.cf, range) + } +} + +impl<'cf, Key, Value, Batch> WriteTypedBatch<'cf, Key, Value, Batch> +where + Key: IntoDisk + FromDisk + Debug, + Value: IntoDisk + FromDisk, + Batch: WriteDisk, +{ + // Batching before writing + + /// Serialize and insert the given key and value into this column family, + /// overwriting any existing `value` for `key`. + pub fn zs_insert(mut self, key: &Key, value: &Value) -> Self { + self.batch.zs_insert(&self.inner.cf, key, value); + + self + } + + /// Remove the given key from this column family, if it exists. + pub fn zs_delete(mut self, key: &Key) -> Self { + self.batch.zs_delete(&self.inner.cf, key); + + self + } + + /// Delete the given key range from this rocksdb column family, if it exists, including `from` + /// and excluding `until_strictly_before`. + //. + // TODO: convert zs_delete_range() to take std::ops::RangeBounds + // see zs_range_iter() for an example of the edge cases + pub fn zs_delete_range(mut self, from: &Key, until_strictly_before: &Key) -> Self { + self.batch + .zs_delete_range(&self.inner.cf, from, until_strictly_before); + + self + } +} + +// Writing a batch to the database requires an owned batch. +impl<'cf, Key, Value> WriteTypedBatch<'cf, Key, Value, DiskWriteBatch> +where + Key: IntoDisk + FromDisk + Debug, + Value: IntoDisk + FromDisk, +{ + // Writing batches + + /// Writes this batch to this column family in the database, + /// taking ownership and consuming it. + pub fn write_batch(self) -> Result<(), rocksdb::Error> { + self.inner.db.write(self.batch) + } +} diff --git a/zebra-state/src/service/finalized_state/disk_db.rs b/zebra-state/src/service/finalized_state/disk_db.rs index e0abc3ce049..825edbc6657 100644 --- a/zebra-state/src/service/finalized_state/disk_db.rs +++ b/zebra-state/src/service/finalized_state/disk_db.rs @@ -7,8 +7,8 @@ //! //! # Correctness //! -//! The [`crate::constants::DATABASE_FORMAT_VERSION`] constants must -//! be incremented each time the database format (column, serialization, etc) changes. +//! [`crate::constants::state_database_format_version_in_code()`] must be incremented +//! each time the database format (column, serialization, etc) changes. use std::{ collections::{BTreeMap, HashMap}, @@ -22,13 +22,18 @@ use itertools::Itertools; use rlimit::increase_nofile_limit; use rocksdb::ReadOptions; -use zebra_chain::parameters::Network; +use semver::Version; +use zebra_chain::{parameters::Network, primitives::byte_array::increment_big_endian}; use crate::{ service::finalized_state::disk_format::{FromDisk, IntoDisk}, Config, }; +// Doc-only imports +#[allow(unused_imports)] +use super::{TypedColumnFamily, WriteTypedBatch}; + #[cfg(any(test, feature = "proptest-impl"))] mod tests; @@ -65,6 +70,12 @@ pub struct DiskDb { // This configuration cannot be modified after the database is initialized, // because some clones would have different values. // + /// The configured database kind for this database. + db_kind: String, + + /// The format version of the running Zebra code. + format_version_in_code: Version, + /// The configured network for this database. network: Network, @@ -93,10 +104,6 @@ pub struct DiskDb { /// /// [`rocksdb::WriteBatch`] is a batched set of database updates, /// which must be written to the database using `DiskDb::write(batch)`. -// -// TODO: move DiskDb, FinalizedBlock, and the source String into this struct, -// (DiskDb can be cloned), -// and make them accessible via read-only methods #[must_use = "batches must be written to the database"] #[derive(Default)] pub struct DiskWriteBatch { @@ -104,10 +111,30 @@ pub struct DiskWriteBatch { batch: rocksdb::WriteBatch, } -/// Helper trait for inserting (Key, Value) pairs into rocksdb with a consistently -/// defined format +impl Debug for DiskWriteBatch { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("DiskWriteBatch") + .field("batch", &format!("{} bytes", self.batch.size_in_bytes())) + .finish() + } +} + +impl PartialEq for DiskWriteBatch { + fn eq(&self, other: &Self) -> bool { + self.batch.data() == other.batch.data() + } +} + +impl Eq for DiskWriteBatch {} + +/// Helper trait for inserting serialized typed (Key, Value) pairs into rocksdb. +/// +/// # Deprecation +/// +/// This trait should not be used in new code, use [`WriteTypedBatch`] instead. // -// TODO: just implement these methods directly on WriteBatch +// TODO: replace uses of this trait with WriteTypedBatch, +// implement these methods directly on WriteTypedBatch, and delete the trait. pub trait WriteDisk { /// Serialize and insert the given key and value into a rocksdb column family, /// overwriting any existing `value` for `key`. @@ -117,20 +144,29 @@ pub trait WriteDisk { K: IntoDisk + Debug, V: IntoDisk; - /// Remove the given key from rocksdb column family if it exists. + /// Remove the given key from a rocksdb column family, if it exists. fn zs_delete(&mut self, cf: &C, key: K) where C: rocksdb::AsColumnFamilyRef, K: IntoDisk + Debug; - /// Deletes the given key range from rocksdb column family if it exists, including `from` and - /// excluding `to`. - fn zs_delete_range(&mut self, cf: &C, from: K, to: K) + /// Delete the given key range from a rocksdb column family, if it exists, including `from` + /// and excluding `until_strictly_before`. + // + // TODO: convert zs_delete_range() to take std::ops::RangeBounds + // see zs_range_iter() for an example of the edge cases + fn zs_delete_range(&mut self, cf: &C, from: K, until_strictly_before: K) where C: rocksdb::AsColumnFamilyRef, K: IntoDisk + Debug; } +/// # Deprecation +/// +/// These impls should not be used in new code, use [`WriteTypedBatch`] instead. +// +// TODO: replace uses of these impls with WriteTypedBatch, +// implement these methods directly on WriteTypedBatch, and delete the trait. impl WriteDisk for DiskWriteBatch { fn zs_insert(&mut self, cf: &C, key: K, value: V) where @@ -154,22 +190,57 @@ impl WriteDisk for DiskWriteBatch { // TODO: convert zs_delete_range() to take std::ops::RangeBounds // see zs_range_iter() for an example of the edge cases - fn zs_delete_range(&mut self, cf: &C, from: K, to: K) + fn zs_delete_range(&mut self, cf: &C, from: K, until_strictly_before: K) where C: rocksdb::AsColumnFamilyRef, K: IntoDisk + Debug, { let from_bytes = from.as_bytes(); - let to_bytes = to.as_bytes(); - self.batch.delete_range_cf(cf, from_bytes, to_bytes); + let until_strictly_before_bytes = until_strictly_before.as_bytes(); + self.batch + .delete_range_cf(cf, from_bytes, until_strictly_before_bytes); + } +} + +// Allow &mut DiskWriteBatch as well as owned DiskWriteBatch +impl WriteDisk for &mut T +where + T: WriteDisk, +{ + fn zs_insert(&mut self, cf: &C, key: K, value: V) + where + C: rocksdb::AsColumnFamilyRef, + K: IntoDisk + Debug, + V: IntoDisk, + { + (*self).zs_insert(cf, key, value) + } + + fn zs_delete(&mut self, cf: &C, key: K) + where + C: rocksdb::AsColumnFamilyRef, + K: IntoDisk + Debug, + { + (*self).zs_delete(cf, key) + } + + fn zs_delete_range(&mut self, cf: &C, from: K, until_strictly_before: K) + where + C: rocksdb::AsColumnFamilyRef, + K: IntoDisk + Debug, + { + (*self).zs_delete_range(cf, from, until_strictly_before) } } -/// Helper trait for retrieving values from rocksdb column familys with a consistently -/// defined format +/// Helper trait for retrieving and deserializing values from rocksdb column families. +/// +/// # Deprecation +/// +/// This trait should not be used in new code, use [`TypedColumnFamily`] instead. // -// TODO: just implement these methods directly on DiskDb -// move this trait, its methods, and support methods to another module +// TODO: replace uses of this trait with TypedColumnFamily, +// implement these methods directly on DiskDb, and delete the trait. pub trait ReadDisk { /// Returns true if a rocksdb column family `cf` does not contain any entries. fn zs_is_empty(&self, cf: &C) -> bool @@ -217,6 +288,16 @@ pub trait ReadDisk { K: IntoDisk + FromDisk, V: FromDisk; + /// Returns the first key strictly greater than `lower_bound` in `cf`, + /// and the corresponding value. + /// + /// Returns `None` if there are no keys greater than `lower_bound`. + fn zs_next_key_value_strictly_after(&self, cf: &C, lower_bound: &K) -> Option<(K, V)> + where + C: rocksdb::AsColumnFamilyRef, + K: IntoDisk + FromDisk, + V: FromDisk; + /// Returns the first key less than or equal to `upper_bound` in `cf`, /// and the corresponding value. /// @@ -227,6 +308,16 @@ pub trait ReadDisk { K: IntoDisk + FromDisk, V: FromDisk; + /// Returns the first key strictly less than `upper_bound` in `cf`, + /// and the corresponding value. + /// + /// Returns `None` if there are no keys less than `upper_bound`. + fn zs_prev_key_value_strictly_before(&self, cf: &C, upper_bound: &K) -> Option<(K, V)> + where + C: rocksdb::AsColumnFamilyRef, + K: IntoDisk + FromDisk, + V: FromDisk; + /// Returns the keys and values in `cf` in `range`, in an ordered `BTreeMap`. /// /// Holding this iterator open might delay block commit transactions. @@ -269,6 +360,12 @@ impl PartialEq for DiskDb { impl Eq for DiskDb {} +/// # Deprecation +/// +/// These impls should not be used in new code, use [`TypedColumnFamily`] instead. +// +// TODO: replace uses of these impls with TypedColumnFamily, +// implement these methods directly on DiskDb, and delete the trait. impl ReadDisk for DiskDb { fn zs_is_empty(&self, cf: &C) -> bool where @@ -318,7 +415,6 @@ impl ReadDisk for DiskDb { .is_some() } - #[allow(clippy::unwrap_in_result)] fn zs_first_key_value(&self, cf: &C) -> Option<(K, V)> where C: rocksdb::AsColumnFamilyRef, @@ -326,10 +422,9 @@ impl ReadDisk for DiskDb { V: FromDisk, { // Reading individual values from iterators does not seem to cause database hangs. - self.zs_range_iter(cf, .., false).next() + self.zs_forward_range_iter(cf, ..).next() } - #[allow(clippy::unwrap_in_result)] fn zs_last_key_value(&self, cf: &C) -> Option<(K, V)> where C: rocksdb::AsColumnFamilyRef, @@ -337,29 +432,47 @@ impl ReadDisk for DiskDb { V: FromDisk, { // Reading individual values from iterators does not seem to cause database hangs. - self.zs_range_iter(cf, .., true).next() + self.zs_reverse_range_iter(cf, ..).next() } - #[allow(clippy::unwrap_in_result)] fn zs_next_key_value_from(&self, cf: &C, lower_bound: &K) -> Option<(K, V)> where C: rocksdb::AsColumnFamilyRef, K: IntoDisk + FromDisk, V: FromDisk, { - // Reading individual values from iterators does not seem to cause database hangs. - self.zs_range_iter(cf, lower_bound.., false).next() + self.zs_forward_range_iter(cf, lower_bound..).next() + } + + fn zs_next_key_value_strictly_after(&self, cf: &C, lower_bound: &K) -> Option<(K, V)> + where + C: rocksdb::AsColumnFamilyRef, + K: IntoDisk + FromDisk, + V: FromDisk, + { + use std::ops::Bound::*; + + // There is no standard syntax for an excluded start bound. + self.zs_forward_range_iter(cf, (Excluded(lower_bound), Unbounded)) + .next() } - #[allow(clippy::unwrap_in_result)] fn zs_prev_key_value_back_from(&self, cf: &C, upper_bound: &K) -> Option<(K, V)> where C: rocksdb::AsColumnFamilyRef, K: IntoDisk + FromDisk, V: FromDisk, { - // Reading individual values from iterators does not seem to cause database hangs. - self.zs_range_iter(cf, ..=upper_bound, true).next() + self.zs_reverse_range_iter(cf, ..=upper_bound).next() + } + + fn zs_prev_key_value_strictly_before(&self, cf: &C, upper_bound: &K) -> Option<(K, V)> + where + C: rocksdb::AsColumnFamilyRef, + K: IntoDisk + FromDisk, + V: FromDisk, + { + self.zs_reverse_range_iter(cf, ..upper_bound).next() } fn zs_items_in_range_ordered(&self, cf: &C, range: R) -> BTreeMap @@ -369,7 +482,7 @@ impl ReadDisk for DiskDb { V: FromDisk, R: RangeBounds, { - self.zs_range_iter(cf, range, false).collect() + self.zs_forward_range_iter(cf, range).collect() } fn zs_items_in_range_unordered(&self, cf: &C, range: R) -> HashMap @@ -379,7 +492,7 @@ impl ReadDisk for DiskDb { V: FromDisk, R: RangeBounds, { - self.zs_range_iter(cf, range, false).collect() + self.zs_forward_range_iter(cf, range).collect() } } @@ -399,18 +512,13 @@ impl DiskWriteBatch { } impl DiskDb { - /// Returns an iterator over the items in `cf` in `range`. - /// - /// Accepts a `reverse` argument. If it is `true`, creates the iterator with an - /// [`IteratorMode`](rocksdb::IteratorMode) of [`End`](rocksdb::IteratorMode::End), or - /// [`From`](rocksdb::IteratorMode::From) with [`Direction::Reverse`](rocksdb::Direction::Reverse). + /// Returns a forward iterator over the items in `cf` in `range`. /// /// Holding this iterator open might delay block commit transactions. - pub fn zs_range_iter( + pub fn zs_forward_range_iter( &self, cf: &C, range: R, - reverse: bool, ) -> impl Iterator + '_ where C: rocksdb::AsColumnFamilyRef, @@ -418,14 +526,12 @@ impl DiskDb { V: FromDisk, R: RangeBounds, { - self.zs_range_iter_with_direction(cf, range, reverse) + self.zs_range_iter_with_direction(cf, range, false) } /// Returns a reverse iterator over the items in `cf` in `range`. /// /// Holding this iterator open might delay block commit transactions. - /// - /// This code is copied from `zs_range_iter()`, but with the mode reversed. pub fn zs_reverse_range_iter( &self, cf: &C, @@ -534,11 +640,8 @@ impl DiskDb { Included(mut bound) => { // Increment the last byte in the upper bound that is less than u8::MAX, and // clear any bytes after it to increment the next key in lexicographic order - // (next big-endian number) this Vec represents to RocksDB. - let is_wrapped_overflow = bound.iter_mut().rev().all(|v| { - *v = v.wrapping_add(1); - v == &0 - }); + // (next big-endian number). RocksDB uses lexicographic order for keys. + let is_wrapped_overflow = increment_big_endian(&mut bound); if is_wrapped_overflow { bound.insert(0, 0x01) @@ -617,44 +720,19 @@ impl DiskDb { /// const MEMTABLE_RAM_CACHE_MEGABYTES: usize = 128; - /// The column families supported by the running database code. - const COLUMN_FAMILIES_IN_CODE: &'static [&'static str] = &[ - // Blocks - "hash_by_height", - "height_by_hash", - "block_header_by_height", - // Transactions - "tx_by_loc", - "hash_by_tx_loc", - "tx_loc_by_hash", - // Transparent - "balance_by_transparent_addr", - "tx_loc_by_transparent_addr_loc", - "utxo_by_out_loc", - "utxo_loc_by_transparent_addr_loc", - // Sprout - "sprout_nullifiers", - "sprout_anchors", - "sprout_note_commitment_tree", - // Sapling - "sapling_nullifiers", - "sapling_anchors", - "sapling_note_commitment_tree", - "sapling_note_commitment_subtree", - // Orchard - "orchard_nullifiers", - "orchard_anchors", - "orchard_note_commitment_tree", - "orchard_note_commitment_subtree", - // Chain - "history_tree", - "tip_chain_value_pool", - ]; - - /// Opens or creates the database at `config.path` for `network`, + /// Opens or creates the database at a path based on the kind, major version and network, + /// with the supplied column families, preserving any existing column families, /// and returns a shared low-level database wrapper. - pub fn new(config: &Config, network: Network) -> DiskDb { - let path = config.db_path(network); + pub fn new( + config: &Config, + db_kind: impl AsRef, + format_version_in_code: &Version, + network: Network, + column_families_in_code: impl IntoIterator, + read_only: bool, + ) -> DiskDb { + let db_kind = db_kind.as_ref(); + let path = config.db_path(db_kind, format_version_in_code.major, network); let db_options = DiskDb::options(); @@ -666,9 +744,7 @@ impl DiskDb { // // let column_families_on_disk = DB::list_cf(&db_options, &path).unwrap_or_default(); - let column_families_in_code = Self::COLUMN_FAMILIES_IN_CODE - .iter() - .map(ToString::to_string); + let column_families_in_code = column_families_in_code.into_iter(); let column_families = column_families_on_disk .into_iter() @@ -676,13 +752,19 @@ impl DiskDb { .unique() .map(|cf_name| rocksdb::ColumnFamilyDescriptor::new(cf_name, db_options.clone())); - let db_result = DB::open_cf_descriptors(&db_options, &path, column_families); + let db_result = if read_only { + DB::open_cf_descriptors_read_only(&db_options, &path, column_families, false) + } else { + DB::open_cf_descriptors(&db_options, &path, column_families) + }; match db_result { Ok(db) => { info!("Opened Zebra state cache at {}", path.display()); let db = DiskDb { + db_kind: db_kind.to_string(), + format_version_in_code: format_version_in_code.clone(), network, ephemeral: config.ephemeral, db: Arc::new(db), @@ -704,6 +786,21 @@ impl DiskDb { // Accessor methods + /// Returns the configured database kind for this database. + pub fn db_kind(&self) -> String { + self.db_kind.clone() + } + + /// Returns the format version of the running code that created this `DiskDb` instance in memory. + pub fn format_version_in_code(&self) -> Version { + self.format_version_in_code.clone() + } + + /// Returns the fixed major version for this database. + pub fn major_version(&self) -> u64 { + self.format_version_in_code().major + } + /// Returns the configured network for this database. pub fn network(&self) -> Network { self.network @@ -714,8 +811,18 @@ impl DiskDb { self.db.path() } + /// Returns the low-level rocksdb inner database. + #[allow(dead_code)] + fn inner(&self) -> &Arc { + &self.db + } + /// Returns the column family handle for `cf_name`. - pub fn cf_handle(&self, cf_name: &str) -> Option { + pub fn cf_handle(&self, cf_name: &str) -> Option> { + // Note: the lifetime returned by this method is subtly wrong. As of December 2023 it is + // the shorter of &self and &str, but RocksDB clones column family names internally, so it + // should just be &self. To avoid this restriction, clone the string before passing it to + // this method. Currently Zebra uses static strings, so this doesn't matter. self.db.cf_handle(cf_name) } diff --git a/zebra-state/src/service/finalized_state/disk_format.rs b/zebra-state/src/service/finalized_state/disk_format.rs index d780843d2b1..0ce04431e54 100644 --- a/zebra-state/src/service/finalized_state/disk_format.rs +++ b/zebra-state/src/service/finalized_state/disk_format.rs @@ -2,8 +2,8 @@ //! //! # Correctness //! -//! The [`crate::constants::DATABASE_FORMAT_VERSION`] constant must -//! be incremented each time the database format (column, serialization, etc) changes. +//! [`crate::constants::state_database_format_version_in_code()`] must be incremented +//! each time the database format (column, serialization, etc) changes. use std::sync::Arc; @@ -13,12 +13,24 @@ pub mod shielded; pub mod transparent; pub mod upgrade; -#[cfg(test)] +#[cfg(feature = "shielded-scan")] +pub mod scan; + +#[cfg(any(test, feature = "proptest-impl"))] mod tests; pub use block::{TransactionIndex, TransactionLocation, MAX_ON_DISK_HEIGHT}; pub use transparent::{OutputIndex, OutputLocation}; +#[cfg(feature = "shielded-scan")] +pub use scan::{ + SaplingScannedDatabaseEntry, SaplingScannedDatabaseIndex, SaplingScannedResult, + SaplingScanningKey, +}; + +#[cfg(any(test, feature = "proptest-impl"))] +pub use tests::KV; + /// Helper type for writing types to disk as raw bytes. /// Also used to convert key types to raw bytes for disk lookups. pub trait IntoDisk { diff --git a/zebra-state/src/service/finalized_state/disk_format/block.rs b/zebra-state/src/service/finalized_state/disk_format/block.rs index 03a7f648053..22495ebf332 100644 --- a/zebra-state/src/service/finalized_state/disk_format/block.rs +++ b/zebra-state/src/service/finalized_state/disk_format/block.rs @@ -2,8 +2,8 @@ //! //! # Correctness //! -//! The [`crate::constants::DATABASE_FORMAT_VERSION`] constant must -//! be incremented each time the database format (column, serialization, etc) changes. +//! [`crate::constants::state_database_format_version_in_code()`] must be incremented +//! each time the database format (column, serialization, etc) changes. use zebra_chain::{ block::{self, Height}, @@ -65,7 +65,7 @@ pub const TRANSACTION_LOCATION_DISK_BYTES: usize = HEIGHT_DISK_BYTES + TX_INDEX_ #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] #[cfg_attr( any(test, feature = "proptest-impl"), - derive(Arbitrary, Serialize, Deserialize) + derive(Arbitrary, Default, Serialize, Deserialize) )] pub struct TransactionIndex(pub(super) u16); @@ -89,11 +89,9 @@ impl TransactionIndex { ) } - /// Returns this index as a `usize` + /// Returns this index as a `usize`. pub fn as_usize(&self) -> usize { - self.0 - .try_into() - .expect("the maximum valid index fits in usize") + self.0.into() } /// Creates a transaction index from a `u64`. @@ -105,13 +103,21 @@ impl TransactionIndex { ) } - /// Returns this index as a `u64` + /// Returns this index as a `u64`. #[allow(dead_code)] pub fn as_u64(&self) -> u64 { - self.0 - .try_into() - .expect("the maximum valid index fits in u64") + self.0.into() } + + /// The minimum value of a transaction index. + /// + /// This value corresponds to the coinbase transaction. + pub const MIN: Self = Self(u16::MIN); + + /// The maximum value of a transaction index. + /// + /// This value corresponds to the highest possible transaction index. + pub const MAX: Self = Self(u16::MAX); } /// A transaction's location in the chain, by block height and transaction index. @@ -120,7 +126,7 @@ impl TransactionIndex { #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] #[cfg_attr( any(test, feature = "proptest-impl"), - derive(Arbitrary, Serialize, Deserialize) + derive(Arbitrary, Default, Serialize, Deserialize) )] pub struct TransactionLocation { /// The block height of the transaction. @@ -131,6 +137,11 @@ pub struct TransactionLocation { } impl TransactionLocation { + /// Creates a transaction location from a block height and transaction index. + pub fn from_parts(height: Height, index: TransactionIndex) -> TransactionLocation { + TransactionLocation { height, index } + } + /// Creates a transaction location from a block height and transaction index. pub fn from_index(height: Height, transaction_index: u16) -> TransactionLocation { TransactionLocation { @@ -154,6 +165,42 @@ impl TransactionLocation { index: TransactionIndex::from_u64(transaction_index), } } + + /// The minimum value of a transaction location. + /// + /// This value corresponds to the genesis coinbase transaction. + pub const MIN: Self = Self { + height: Height::MIN, + index: TransactionIndex::MIN, + }; + + /// The maximum value of a transaction location. + /// + /// This value corresponds to the last transaction in the highest possible block. + pub const MAX: Self = Self { + height: Height::MAX, + index: TransactionIndex::MAX, + }; + + /// The minimum value of a transaction location for `height`. + /// + /// This value is the coinbase transaction. + pub const fn min_for_height(height: Height) -> Self { + Self { + height, + index: TransactionIndex::MIN, + } + } + + /// The maximum value of a transaction location for `height`. + /// + /// This value can be a valid entry, but it won't fit in a 2MB block. + pub const fn max_for_height(height: Height) -> Self { + Self { + height, + index: TransactionIndex::MAX, + } + } } // Block and transaction trait impls diff --git a/zebra-state/src/service/finalized_state/disk_format/chain.rs b/zebra-state/src/service/finalized_state/disk_format/chain.rs index 512424ec16f..5b45ae2c73a 100644 --- a/zebra-state/src/service/finalized_state/disk_format/chain.rs +++ b/zebra-state/src/service/finalized_state/disk_format/chain.rs @@ -2,20 +2,16 @@ //! //! # Correctness //! -//! The [`crate::constants::DATABASE_FORMAT_VERSION`] constant must -//! be incremented each time the database format (column, serialization, etc) changes. +//! [`crate::constants::state_database_format_version_in_code()`] must be incremented +//! each time the database format (column, serialization, etc) changes. use std::collections::BTreeMap; use bincode::Options; use zebra_chain::{ - amount::NonNegative, - block::Height, - history_tree::{HistoryTree, NonEmptyHistoryTree}, - parameters::Network, - primitives::zcash_history, - value_balance::ValueBalance, + amount::NonNegative, block::Height, history_tree::NonEmptyHistoryTree, parameters::Network, + primitives::zcash_history, value_balance::ValueBalance, }; use crate::service::finalized_state::disk_format::{FromDisk, IntoDisk}; @@ -82,10 +78,3 @@ impl FromDisk for NonEmptyHistoryTree { .expect("deserialization format should match the serialization format used by IntoDisk") } } - -// We don't write empty history trees to disk, so we know this one is non-empty. -impl FromDisk for HistoryTree { - fn from_bytes(bytes: impl AsRef<[u8]>) -> Self { - NonEmptyHistoryTree::from_bytes(bytes).into() - } -} diff --git a/zebra-state/src/service/finalized_state/disk_format/scan.rs b/zebra-state/src/service/finalized_state/disk_format/scan.rs new file mode 100644 index 00000000000..db5ab78efba --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/scan.rs @@ -0,0 +1,261 @@ +//! Serialization formats for the shielded scanner results database. +//! +//! Due to Rust's orphan rule, these serializations must be implemented in this crate. +//! +//! # Correctness +//! +//! `zebra_scan::Storage::database_format_version_in_code()` must be incremented +//! each time the database format (column, serialization, etc) changes. + +use std::fmt; + +use hex::{FromHex, ToHex}; +use zebra_chain::{block::Height, transaction}; + +use crate::{FromDisk, IntoDisk, TransactionLocation}; + +use super::block::TRANSACTION_LOCATION_DISK_BYTES; + +#[cfg(any(test, feature = "proptest-impl"))] +use proptest_derive::Arbitrary; + +#[cfg(test)] +mod tests; + +/// The type used in Zebra to store Sapling scanning keys. +/// It can represent a full viewing key or an individual viewing key. +pub type SaplingScanningKey = String; + +/// Stores a scanning result. +/// +/// Currently contains a TXID in "display order", which is big-endian byte order following the u256 +/// convention set by Bitcoin and zcashd. +#[derive(Copy, Clone, Eq, PartialEq)] +#[cfg_attr( + any(test, feature = "proptest-impl"), + derive(Arbitrary, Default, serde::Serialize, serde::Deserialize) +)] +pub struct SaplingScannedResult( + #[cfg_attr(any(test, feature = "proptest-impl"), serde(with = "hex"))] [u8; 32], +); + +impl fmt::Display for SaplingScannedResult { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(&self.encode_hex::()) + } +} + +impl fmt::Debug for SaplingScannedResult { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple("SaplingScannedResult") + .field(&self.encode_hex::()) + .finish() + } +} + +impl ToHex for &SaplingScannedResult { + fn encode_hex>(&self) -> T { + self.bytes_in_display_order().encode_hex() + } + + fn encode_hex_upper>(&self) -> T { + self.bytes_in_display_order().encode_hex_upper() + } +} + +impl ToHex for SaplingScannedResult { + fn encode_hex>(&self) -> T { + (&self).encode_hex() + } + + fn encode_hex_upper>(&self) -> T { + (&self).encode_hex_upper() + } +} + +impl FromHex for SaplingScannedResult { + type Error = <[u8; 32] as FromHex>::Error; + + fn from_hex>(hex: T) -> Result { + let result = <[u8; 32]>::from_hex(hex)?; + + Ok(Self::from_bytes_in_display_order(result)) + } +} + +impl From for transaction::Hash { + fn from(scanned_result: SaplingScannedResult) -> Self { + transaction::Hash::from_bytes_in_display_order(&scanned_result.0) + } +} + +impl From for SaplingScannedResult { + fn from(hash: transaction::Hash) -> Self { + SaplingScannedResult(hash.bytes_in_display_order()) + } +} + +impl SaplingScannedResult { + /// Creates a `SaplingScannedResult` from bytes in display order. + pub fn from_bytes_in_display_order(bytes: [u8; 32]) -> Self { + Self(bytes) + } + + /// Returns the inner bytes in display order. + pub fn bytes_in_display_order(&self) -> [u8; 32] { + self.0 + } +} + +/// A database column family entry for a block scanned with a Sapling vieweing key. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary, Default))] +pub struct SaplingScannedDatabaseEntry { + /// The database column family key. Must be unique for each scanning key and scanned block. + pub index: SaplingScannedDatabaseIndex, + + /// The database column family value. + pub value: Option, +} + +/// A database column family key for a block scanned with a Sapling vieweing key. +#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] +#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary, Default))] +pub struct SaplingScannedDatabaseIndex { + /// The Sapling viewing key used to scan the block. + pub sapling_key: SaplingScanningKey, + + /// The transaction location: block height and transaction index. + pub tx_loc: TransactionLocation, +} + +impl SaplingScannedDatabaseIndex { + /// The minimum value of a sapling scanned database index. + /// + /// This value is guarateed to be the minimum, and not correspond to a valid key. + // + // Note: to calculate the maximum value, we need a key length. + pub const fn min() -> Self { + Self { + // The empty string is the minimum value in RocksDB lexicographic order. + sapling_key: String::new(), + tx_loc: TransactionLocation::MIN, + } + } + + /// The minimum value of a sapling scanned database index for `sapling_key`. + /// + /// This value does not correspond to a valid entry. + /// (The genesis coinbase transaction does not have shielded transfers.) + pub fn min_for_key(sapling_key: &SaplingScanningKey) -> Self { + Self { + sapling_key: sapling_key.clone(), + tx_loc: TransactionLocation::MIN, + } + } + + /// The maximum value of a sapling scanned database index for `sapling_key`. + /// + /// This value may correspond to a valid entry, but it won't be mined for many decades. + pub fn max_for_key(sapling_key: &SaplingScanningKey) -> Self { + Self { + sapling_key: sapling_key.clone(), + tx_loc: TransactionLocation::MAX, + } + } + + /// The minimum value of a sapling scanned database index for `sapling_key` and `height`. + /// + /// This value can be a valid entry for shielded coinbase. + pub fn min_for_key_and_height(sapling_key: &SaplingScanningKey, height: Height) -> Self { + Self { + sapling_key: sapling_key.clone(), + tx_loc: TransactionLocation::min_for_height(height), + } + } + + /// The maximum value of a sapling scanned database index for `sapling_key` and `height`. + /// + /// This value can be a valid entry, but it won't fit in a 2MB block. + pub fn max_for_key_and_height(sapling_key: &SaplingScanningKey, height: Height) -> Self { + Self { + sapling_key: sapling_key.clone(), + tx_loc: TransactionLocation::max_for_height(height), + } + } +} + +impl IntoDisk for SaplingScanningKey { + type Bytes = Vec; + + fn as_bytes(&self) -> Self::Bytes { + SaplingScanningKey::as_bytes(self).to_vec() + } +} + +impl FromDisk for SaplingScanningKey { + fn from_bytes(bytes: impl AsRef<[u8]>) -> Self { + SaplingScanningKey::from_utf8(bytes.as_ref().to_vec()) + .expect("only valid UTF-8 strings are written to the database") + } +} + +impl IntoDisk for SaplingScannedDatabaseIndex { + type Bytes = Vec; + + fn as_bytes(&self) -> Self::Bytes { + let mut bytes = Vec::new(); + + bytes.extend(self.sapling_key.as_bytes()); + bytes.extend(self.tx_loc.as_bytes()); + + bytes + } +} + +impl FromDisk for SaplingScannedDatabaseIndex { + fn from_bytes(bytes: impl AsRef<[u8]>) -> Self { + let bytes = bytes.as_ref(); + + let (sapling_key, tx_loc) = bytes.split_at(bytes.len() - TRANSACTION_LOCATION_DISK_BYTES); + + Self { + sapling_key: SaplingScanningKey::from_bytes(sapling_key), + tx_loc: TransactionLocation::from_bytes(tx_loc), + } + } +} + +// We can't implement IntoDisk or FromDisk for SaplingScannedResult, +// because the format is actually Option. + +impl IntoDisk for Option { + type Bytes = Vec; + + fn as_bytes(&self) -> Self::Bytes { + let mut bytes = Vec::new(); + + if let Some(result) = self.as_ref() { + bytes.extend(result.bytes_in_display_order()); + } + + bytes + } +} + +impl FromDisk for Option { + #[allow(clippy::unwrap_in_result)] + fn from_bytes(bytes: impl AsRef<[u8]>) -> Self { + let bytes = bytes.as_ref(); + + if bytes.is_empty() { + None + } else { + Some(SaplingScannedResult::from_bytes_in_display_order( + bytes + .try_into() + .expect("unexpected incorrect SaplingScannedResult data length"), + )) + } + } +} diff --git a/zebra-state/src/service/finalized_state/disk_format/scan/tests.rs b/zebra-state/src/service/finalized_state/disk_format/scan/tests.rs new file mode 100644 index 00000000000..0aa4c938982 --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/scan/tests.rs @@ -0,0 +1,3 @@ +//! Tests for scanner database serialization. + +mod prop; diff --git a/zebra-state/src/service/finalized_state/disk_format/scan/tests/prop.rs b/zebra-state/src/service/finalized_state/disk_format/scan/tests/prop.rs new file mode 100644 index 00000000000..c770daecbd1 --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/scan/tests/prop.rs @@ -0,0 +1,37 @@ +//! Randomised proptests for scanner database formats. + +use proptest::{arbitrary::any, prelude::*}; + +use crate::{ + service::finalized_state::arbitrary::assert_value_properties, SaplingScannedDatabaseIndex, + SaplingScannedResult, SaplingScanningKey, MAX_ON_DISK_HEIGHT, +}; + +#[test] +fn roundtrip_sapling_scanning_key() { + let _init_guard = zebra_test::init(); + + proptest!(|(val in any::())| assert_value_properties(val)); +} + +#[test] +fn roundtrip_sapling_db_index() { + let _init_guard = zebra_test::init(); + + proptest!( + |(mut val in any::())| { + // Limit the random height to the valid on-disk range. + // Blocks outside this range are rejected before they reach the state. + // (It would take decades to generate a valid chain this high.) + val.tx_loc.height.0 %= MAX_ON_DISK_HEIGHT.0 + 1; + assert_value_properties(val) + } + ); +} + +#[test] +fn roundtrip_option_sapling_result() { + let _init_guard = zebra_test::init(); + + proptest!(|(val in any::>())| assert_value_properties(val)); +} diff --git a/zebra-state/src/service/finalized_state/disk_format/shielded.rs b/zebra-state/src/service/finalized_state/disk_format/shielded.rs index 622b1ae1a7c..bcd24d5c604 100644 --- a/zebra-state/src/service/finalized_state/disk_format/shielded.rs +++ b/zebra-state/src/service/finalized_state/disk_format/shielded.rs @@ -2,8 +2,8 @@ //! //! # Correctness //! -//! The [`crate::constants::DATABASE_FORMAT_VERSION`] constant must -//! be incremented each time the database format (column, serialization, etc) changes. +//! [`crate::constants::state_database_format_version_in_code()`] must be incremented +//! each time the database format (column, serialization, etc) changes. use bincode::Options; @@ -178,11 +178,11 @@ impl IntoDisk for orchard::tree::Node { } } -impl>> IntoDisk for NoteCommitmentSubtreeData { +impl>> IntoDisk for NoteCommitmentSubtreeData { type Bytes = Vec; fn as_bytes(&self) -> Self::Bytes { - [self.end.as_bytes().to_vec(), self.node.as_bytes()].concat() + [self.end_height.as_bytes().to_vec(), self.root.as_bytes()].concat() } } diff --git a/zebra-state/src/service/finalized_state/disk_format/tests.rs b/zebra-state/src/service/finalized_state/disk_format/tests.rs index 4680764c453..f959800ba8d 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests.rs +++ b/zebra-state/src/service/finalized_state/disk_format/tests.rs @@ -2,12 +2,14 @@ use serde::{Deserialize, Serialize}; +#[cfg(test)] mod prop; +#[cfg(test)] mod snapshot; /// A formatting struct for raw key-value data #[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] -struct KV { +pub struct KV { /// The raw key bytes, as a hexadecimal-encoded string. k: String, @@ -17,7 +19,7 @@ struct KV { impl KV { /// Create a new `KV` from raw key-value data. - fn new(key: K, value: V) -> KV + pub fn new(key: K, value: V) -> KV where K: AsRef<[u8]>, V: AsRef<[u8]>, @@ -26,7 +28,7 @@ impl KV { } /// Create a new `KV` from hex-encoded key-value data. - fn new_hex(key: String, value: String) -> KV { + pub fn new_hex(key: String, value: String) -> KV { KV { k: key, v: value } } } diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/prop.rs b/zebra-state/src/service/finalized_state/disk_format/tests/prop.rs index cd3c6cf26d8..5067bcfe7a6 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/prop.rs +++ b/zebra-state/src/service/finalized_state/disk_format/tests/prop.rs @@ -26,12 +26,15 @@ use crate::service::finalized_state::{ // Common -// TODO: turn this into a unit test, it has a fixed value +/// This test has a fixed value, so testing it once is sufficient. #[test] fn roundtrip_unit_type() { let _init_guard = zebra_test::init(); - proptest!(|(val in any::<()>())| assert_value_properties(val)); + // The unit type `()` is serialized to the empty (zero-length) array `[]`. + #[allow(clippy::let_unit_value)] + let value = (); + assert_value_properties(value); } // Block @@ -46,7 +49,7 @@ fn roundtrip_block_height() { // Limit the random height to the valid on-disk range. // Blocks outside this range are rejected before they reach the state. // (It would take decades to generate a valid chain this high.) - val = val.clamp(Height(0), MAX_ON_DISK_HEIGHT); + val.0 %= MAX_ON_DISK_HEIGHT.0 + 1; assert_value_properties(val) } ); @@ -74,7 +77,7 @@ fn roundtrip_transaction_location() { proptest!( |(mut val in any::())| { - val.height = val.height.clamp(Height(0), MAX_ON_DISK_HEIGHT); + val.height.0 %= MAX_ON_DISK_HEIGHT.0 + 1; assert_value_properties(val) } ); @@ -142,7 +145,7 @@ fn roundtrip_output_location() { proptest!( |(mut val in any::())| { - *val.height_mut() = val.height().clamp(Height(0), MAX_ON_DISK_HEIGHT); + val.height_mut().0 %= MAX_ON_DISK_HEIGHT.0 + 1; assert_value_properties(val) } ); @@ -154,7 +157,7 @@ fn roundtrip_address_location() { proptest!( |(mut val in any::())| { - *val.height_mut() = val.height().clamp(Height(0), MAX_ON_DISK_HEIGHT); + val.height_mut().0 %= MAX_ON_DISK_HEIGHT.0 + 1; assert_value_properties(val) } ); @@ -166,7 +169,7 @@ fn roundtrip_address_balance_location() { proptest!( |(mut val in any::())| { - *val.height_mut() = val.address_location().height().clamp(Height(0), MAX_ON_DISK_HEIGHT); + val.height_mut().0 %= MAX_ON_DISK_HEIGHT.0 + 1; assert_value_properties(val) } ); @@ -185,8 +188,8 @@ fn roundtrip_address_unspent_output() { proptest!( |(mut val in any::())| { - *val.address_location_mut().height_mut() = val.address_location().height().clamp(Height(0), MAX_ON_DISK_HEIGHT); - *val.unspent_output_location_mut().height_mut() = val.unspent_output_location().height().clamp(Height(0), MAX_ON_DISK_HEIGHT); + val.address_location_mut().height_mut().0 %= MAX_ON_DISK_HEIGHT.0 + 1; + val.unspent_output_location_mut().height_mut().0 %= MAX_ON_DISK_HEIGHT.0 + 1; assert_value_properties(val) } @@ -199,8 +202,8 @@ fn roundtrip_address_transaction() { proptest!( |(mut val in any::())| { - *val.address_location_mut().height_mut() = val.address_location().height().clamp(Height(0), MAX_ON_DISK_HEIGHT); - val.transaction_location_mut().height = val.transaction_location().height.clamp(Height(0), MAX_ON_DISK_HEIGHT); + val.address_location_mut().height_mut().0 %= MAX_ON_DISK_HEIGHT.0 + 1; + val.transaction_location_mut().height.0 %= MAX_ON_DISK_HEIGHT.0 + 1; assert_value_properties(val) } @@ -376,7 +379,7 @@ fn roundtrip_sapling_subtree_data() { let _init_guard = zebra_test::init(); proptest!(|(mut val in any::>())| { - val.end.0 %= MAX_ON_DISK_HEIGHT.0 + 1; + val.end_height.0 %= MAX_ON_DISK_HEIGHT.0 + 1; assert_value_properties(val) }); } @@ -461,8 +464,7 @@ fn roundtrip_orchard_subtree_data() { let _init_guard = zebra_test::init(); proptest!(|(mut val in any::>())| { - val.end = val.end.clamp(Height(0), MAX_ON_DISK_HEIGHT); - val.end.0 %= MAX_ON_DISK_HEIGHT.0 + 1; + val.end_height.0 %= MAX_ON_DISK_HEIGHT.0 + 1; assert_value_properties(val) }); } diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs b/zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs index 67b4f2ebb68..5866b08ddc2 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs @@ -27,7 +27,7 @@ //! //! Test shielded data, and data activated in Overwinter and later network upgrades. -use std::sync::Arc; +use std::{collections::BTreeMap, sync::Arc}; use zebra_chain::{ block::Block, @@ -37,11 +37,11 @@ use zebra_chain::{ use crate::{ service::finalized_state::{ - disk_db::{DiskDb, DB}, - disk_format::tests::KV, + disk_db::DiskDb, + disk_format::{tests::KV, RawBytes}, FinalizedState, }, - Config, + Config, ReadDisk, }; /// Snapshot test for RocksDB column families, and their key-value data. @@ -133,13 +133,12 @@ fn snapshot_raw_rocksdb_column_family_data(db: &DiskDb, original_cf_names: &[Str .expect("RocksDB API provides correct names"); // Correctness: Multi-key iteration causes hangs in concurrent code, but seems ok in tests. - let mut cf_iter = db.full_iterator_cf(&cf_handle, rocksdb::IteratorMode::Start); + let cf_items: BTreeMap = db.zs_items_in_range_ordered(&cf_handle, ..); // The default raw data serialization is very verbose, so we hex-encode the bytes. - let cf_data: Vec = cf_iter - .by_ref() - .map(|result| result.expect("unexpected database error")) - .map(|(key, value)| KV::new(key, value)) + let cf_data: Vec = cf_items + .iter() + .map(|(key, value)| KV::new(key.raw_bytes(), value.raw_bytes())) .collect(); if cf_name == "default" { @@ -153,14 +152,6 @@ fn snapshot_raw_rocksdb_column_family_data(db: &DiskDb, original_cf_names: &[Str // because those roots are used to populate the anchor column families. insta::assert_ron_snapshot!(format!("{cf_name}_raw_data"), cf_data); } - - let raw_cf_iter: rocksdb::DBRawIteratorWithThreadMode = cf_iter.into(); - - assert_eq!( - raw_cf_iter.status(), - Ok(()), - "unexpected column family iterator error", - ); } insta::assert_ron_snapshot!("empty_column_families", empty_column_families); diff --git a/zebra-state/src/service/finalized_state/disk_format/transparent.rs b/zebra-state/src/service/finalized_state/disk_format/transparent.rs index d3fb01c390f..46879f39aaf 100644 --- a/zebra-state/src/service/finalized_state/disk_format/transparent.rs +++ b/zebra-state/src/service/finalized_state/disk_format/transparent.rs @@ -2,8 +2,8 @@ //! //! # Correctness //! -//! The [`crate::constants::DATABASE_FORMAT_VERSION`] constant must -//! be incremented each time the database format (column, serialization, etc) changes. +//! [`crate::constants::state_database_format_version_in_code()`] must be incremented +//! each time the database format (column, serialization, etc) changes. use std::{cmp::max, fmt::Debug}; @@ -420,13 +420,13 @@ impl AddressTransaction { /// address. Starts at the first UTXO, or at the `query` start height, whichever is greater. /// Ends at the maximum possible transaction index for the end height. /// - /// Used to look up transactions with [`DiskDb::zs_range_iter`][1]. + /// Used to look up transactions with [`DiskDb::zs_forward_range_iter`][1]. /// /// The transaction locations in the: /// - start bound might be invalid, if it is based on the `query` start height. /// - end bound will always be invalid. /// - /// But this is not an issue, since [`DiskDb::zs_range_iter`][1] will fetch all existing + /// But this is not an issue, since [`DiskDb::zs_forward_range_iter`][1] will fetch all existing /// (valid) values in the range. /// /// [1]: super::super::disk_db::DiskDb diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade.rs index 86a04553c50..7dc157b2f05 100644 --- a/zebra-state/src/service/finalized_state/disk_format/upgrade.rs +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade.rs @@ -15,17 +15,13 @@ use zebra_chain::{ task::{CheckForPanics, WaitForPanics}, CodeTimer, }, - parameters::Network, }; use DbFormatChange::*; use crate::{ - config::write_database_format_version_to_disk, - constants::{latest_version_for_adding_subtrees, DATABASE_FORMAT_VERSION}, - database_format_version_in_code, database_format_version_on_disk, + constants::latest_version_for_adding_subtrees, service::finalized_state::{DiskWriteBatch, ZebraDb}, - Config, }; pub(crate) mod add_subtrees; @@ -113,7 +109,9 @@ impl DbFormatChange { /// Also logs that change at info level. /// /// If `disk_version` is `None`, Zebra is creating a new database. - pub fn open_database(running_version: Version, disk_version: Option) -> Self { + pub fn open_database(running_version: &Version, disk_version: Option) -> Self { + let running_version = running_version.clone(); + let Some(disk_version) = disk_version else { info!( %running_version, @@ -160,8 +158,8 @@ impl DbFormatChange { /// This check makes sure the upgrade and new block code produce the same data. /// /// Also logs the check at info level. - pub fn check_new_blocks() -> Self { - let running_version = database_format_version_in_code(); + pub fn check_new_blocks(db: &ZebraDb) -> Self { + let running_version = db.format_version_in_code(); info!(%running_version, "checking new blocks were written in current database format"); CheckNewBlocksCurrent { running_version } @@ -219,14 +217,12 @@ impl DbFormatChange { /// Launch a `std::thread` that applies this format change to the database, /// then continues running to perform periodic format checks. /// - /// `initial_tip_height` is the database height when it was opened, and `upgrade_db` is the + /// `initial_tip_height` is the database height when it was opened, and `db` is the /// database instance to upgrade or check. pub fn spawn_format_change( self, - config: Config, - network: Network, + db: ZebraDb, initial_tip_height: Option, - upgrade_db: ZebraDb, ) -> DbFormatChangeThreadHandle { // # Correctness // @@ -237,13 +233,7 @@ impl DbFormatChange { let span = Span::current(); let update_task = thread::spawn(move || { span.in_scope(move || { - self.format_change_run_loop( - config, - network, - initial_tip_height, - upgrade_db, - cancel_receiver, - ) + self.format_change_run_loop(db, initial_tip_height, cancel_receiver) }) }); @@ -264,21 +254,13 @@ impl DbFormatChange { /// newly added blocks matches the current format. It will run until it is cancelled or panics. fn format_change_run_loop( self, - config: Config, - network: Network, + db: ZebraDb, initial_tip_height: Option, - upgrade_db: ZebraDb, cancel_receiver: mpsc::Receiver, ) -> Result<(), CancelFormatChange> { - self.run_format_change_or_check( - &config, - network, - initial_tip_height, - &upgrade_db, - &cancel_receiver, - )?; - - let Some(debug_validity_check_interval) = config.debug_validity_check_interval else { + self.run_format_change_or_check(&db, initial_tip_height, &cancel_receiver)?; + + let Some(debug_validity_check_interval) = db.config().debug_validity_check_interval else { return Ok(()); }; @@ -292,11 +274,9 @@ impl DbFormatChange { return Err(CancelFormatChange); } - Self::check_new_blocks().run_format_change_or_check( - &config, - network, + Self::check_new_blocks(&db).run_format_change_or_check( + &db, initial_tip_height, - &upgrade_db, &cancel_receiver, )?; } @@ -306,24 +286,16 @@ impl DbFormatChange { #[allow(clippy::unwrap_in_result)] pub(crate) fn run_format_change_or_check( &self, - config: &Config, - network: Network, + db: &ZebraDb, initial_tip_height: Option, - upgrade_db: &ZebraDb, cancel_receiver: &mpsc::Receiver, ) -> Result<(), CancelFormatChange> { match self { // Perform any required upgrades, then mark the state as upgraded. - Upgrade { .. } => self.apply_format_upgrade( - config, - network, - initial_tip_height, - upgrade_db, - cancel_receiver, - )?, + Upgrade { .. } => self.apply_format_upgrade(db, initial_tip_height, cancel_receiver)?, NewlyCreated { .. } => { - Self::mark_as_newly_created(config, network); + Self::mark_as_newly_created(db); } Downgrade { .. } => { @@ -332,7 +304,7 @@ impl DbFormatChange { // At the start of a format downgrade, the database must be marked as partially or // fully downgraded. This lets newer Zebra versions know that some blocks with older // formats have been added to the database. - Self::mark_as_downgraded(config, network); + Self::mark_as_downgraded(db); // Older supported versions just assume they can read newer formats, // because they can't predict all changes a newer Zebra version could make. @@ -373,10 +345,10 @@ impl DbFormatChange { // (unless a future upgrade breaks these format checks) // - re-opening the current version should be valid, regardless of whether the upgrade // or new block code created the format (or any combination). - Self::format_validity_checks_detailed(upgrade_db, cancel_receiver)?.unwrap_or_else(|_| { + Self::format_validity_checks_detailed(db, cancel_receiver)?.unwrap_or_else(|_| { panic!( "unexpected invalid database format: delete and re-sync the database at '{:?}'", - upgrade_db.path() + db.path() ) }); @@ -392,6 +364,8 @@ impl DbFormatChange { Ok(()) } + // TODO: Move state-specific upgrade code to a finalized_state/* module. + /// Apply any required format updates to the database. /// Format changes should be launched in an independent `std::thread`. /// @@ -405,10 +379,8 @@ impl DbFormatChange { #[allow(clippy::unwrap_in_result)] fn apply_format_upgrade( &self, - config: &Config, - network: Network, - initial_tip_height: Option, db: &ZebraDb, + initial_tip_height: Option, cancel_receiver: &mpsc::Receiver, ) -> Result<(), CancelFormatChange> { let Upgrade { @@ -430,7 +402,7 @@ impl DbFormatChange { "marking empty database as upgraded" ); - Self::mark_as_upgraded_to(&database_format_version_in_code(), config, network); + Self::mark_as_upgraded_to(db, newer_running_version); info!( %newer_running_version, @@ -510,7 +482,7 @@ impl DbFormatChange { // Mark the database as upgraded. Zebra won't repeat the upgrade anymore once the // database is marked, so the upgrade MUST be complete at this point. - Self::mark_as_upgraded_to(&version_for_pruning_trees, config, network); + Self::mark_as_upgraded_to(db, &version_for_pruning_trees); timer.finish(module_path!(), line!(), "deduplicate trees upgrade"); } @@ -538,7 +510,7 @@ impl DbFormatChange { // Mark the database as upgraded. Zebra won't repeat the upgrade anymore once the // database is marked, so the upgrade MUST be complete at this point. - Self::mark_as_upgraded_to(&latest_version_for_adding_subtrees, config, network); + Self::mark_as_upgraded_to(db, &latest_version_for_adding_subtrees); timer.finish(module_path!(), line!(), "add subtrees upgrade"); } @@ -564,7 +536,7 @@ impl DbFormatChange { // Mark the database as upgraded. Zebra won't repeat the upgrade anymore once the // database is marked, so the upgrade MUST be complete at this point. - Self::mark_as_upgraded_to(&version_for_tree_keys_and_caches, config, network); + Self::mark_as_upgraded_to(db, &version_for_tree_keys_and_caches); timer.finish(module_path!(), line!(), "tree keys and caches upgrade"); } @@ -721,12 +693,13 @@ impl DbFormatChange { /// # Panics /// /// If the format should not have been upgraded, because the database is not newly created. - fn mark_as_newly_created(config: &Config, network: Network) { - let disk_version = database_format_version_on_disk(config, network) + fn mark_as_newly_created(db: &ZebraDb) { + let running_version = db.format_version_in_code(); + let disk_version = db + .format_version_on_disk() .expect("unable to read database format version file path"); - let running_version = database_format_version_in_code(); - let default_new_version = Some(Version::new(DATABASE_FORMAT_VERSION, 0, 0)); + let default_new_version = Some(Version::new(running_version.major, 0, 0)); // The database version isn't empty any more, because we've created the RocksDB database // and acquired its lock. (If it is empty, we have a database locking bug.) @@ -737,7 +710,7 @@ impl DbFormatChange { running: {running_version}" ); - write_database_format_version_to_disk(&running_version, config, network) + db.update_format_version_on_disk(&running_version) .expect("unable to write database format version file to disk"); info!( @@ -768,11 +741,12 @@ impl DbFormatChange { /// - older or the same as the disk version /// (multiple upgrades to the same version are not allowed) /// - greater than the running version (that's a logic bug) - fn mark_as_upgraded_to(format_upgrade_version: &Version, config: &Config, network: Network) { - let disk_version = database_format_version_on_disk(config, network) + fn mark_as_upgraded_to(db: &ZebraDb, format_upgrade_version: &Version) { + let running_version = db.format_version_in_code(); + let disk_version = db + .format_version_on_disk() .expect("unable to read database format version file") .expect("tried to upgrade a newly created database"); - let running_version = database_format_version_in_code(); assert!( running_version > disk_version, @@ -798,7 +772,7 @@ impl DbFormatChange { running: {running_version}" ); - write_database_format_version_to_disk(format_upgrade_version, config, network) + db.update_format_version_on_disk(format_upgrade_version) .expect("unable to write database format version file to disk"); info!( @@ -826,11 +800,12 @@ impl DbFormatChange { /// If the state is newly created, because the running version should be the same. /// /// Multiple downgrades are allowed, because they all downgrade to the same running version. - fn mark_as_downgraded(config: &Config, network: Network) { - let disk_version = database_format_version_on_disk(config, network) + fn mark_as_downgraded(db: &ZebraDb) { + let running_version = db.format_version_in_code(); + let disk_version = db + .format_version_on_disk() .expect("unable to read database format version file") .expect("can't downgrade a newly created database"); - let running_version = database_format_version_in_code(); assert!( disk_version >= running_version, @@ -839,7 +814,7 @@ impl DbFormatChange { running: {running_version}" ); - write_database_format_version_to_disk(&running_version, config, network) + db.update_format_version_on_disk(&running_version) .expect("unable to write database format version file to disk"); info!( @@ -883,7 +858,7 @@ impl DbFormatChangeThreadHandle { /// /// This method should be called regularly, so that panics are detected as soon as possible. pub fn check_for_panics(&mut self) { - self.update_task.check_for_panics(); + self.update_task.panic_if_task_has_panicked(); } /// Wait for the spawned thread to finish. If it exited with a panic, resume that panic. diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade/add_subtrees.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade/add_subtrees.rs index 72dc4d55c90..d84392ebf84 100644 --- a/zebra-state/src/service/finalized_state/disk_format/upgrade/add_subtrees.rs +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade/add_subtrees.rs @@ -171,11 +171,11 @@ fn first_sapling_mainnet_subtree() -> NoteCommitmentSubtree // ``` NoteCommitmentSubtree { index: 0.into(), - node: hex!("754bb593ea42d231a7ddf367640f09bbf59dc00f2c1d2003cc340e0c016b5b13") + root: hex!("754bb593ea42d231a7ddf367640f09bbf59dc00f2c1d2003cc340e0c016b5b13") .as_slice() .try_into() .expect("test vector is valid"), - end: Height(558822), + end_height: Height(558822), } } @@ -187,11 +187,11 @@ fn first_orchard_mainnet_subtree() -> NoteCommitmentSubtree // ``` NoteCommitmentSubtree { index: 0.into(), - node: hex!("d4e323b3ae0cabfb6be4087fec8c66d9a9bbfc354bf1d9588b6620448182063b") + root: hex!("d4e323b3ae0cabfb6be4087fec8c66d9a9bbfc354bf1d9588b6620448182063b") .as_slice() .try_into() .expect("test vector is valid"), - end: Height(1707429), + end_height: Height(1707429), } } @@ -360,9 +360,9 @@ fn check_sapling_subtrees( }; // Check that there was a sapling note at the subtree's end height. - let Some(tree) = db.sapling_tree_by_height(&subtree.end) else { + let Some(tree) = db.sapling_tree_by_height(&subtree.end_height) else { result = Err("missing note commitment tree at subtree completion height"); - error!(?result, ?subtree.end); + error!(?result, ?subtree.end_height); continue; }; @@ -373,7 +373,7 @@ fn check_sapling_subtrees( error!(?result); } - if subtree.node != node { + if subtree.root != node { result = Err("completed subtree roots should match"); error!(?result); } @@ -381,13 +381,13 @@ fn check_sapling_subtrees( // Check that the final note has a greater subtree index if it didn't complete a subtree. else { let prev_height = subtree - .end + .end_height .previous() .expect("Note commitment subtrees should not end at the minimal height."); let Some(prev_tree) = db.sapling_tree_by_height(&prev_height) else { result = Err("missing note commitment tree below subtree completion height"); - error!(?result, ?subtree.end); + error!(?result, ?subtree.end_height); continue; }; @@ -430,15 +430,15 @@ fn check_sapling_subtrees( }; // Check that the subtree end height matches that in the sapling trees. - if subtree.end != height { + if subtree.end_height != height { let is_complete = tree.is_complete_subtree(); result = Err("bad sapling subtree end height"); - error!(?result, ?subtree.end, ?height, ?index, ?is_complete, ); + error!(?result, ?subtree.end_height, ?height, ?index, ?is_complete, ); } // Check the root if the sapling note commitment tree at this height is a complete subtree. if let Some((_index, node)) = tree.completed_subtree_index_and_root() { - if subtree.node != node { + if subtree.root != node { result = Err("completed subtree roots should match"); error!(?result); } @@ -490,9 +490,9 @@ fn check_orchard_subtrees( }; // Check that there was a orchard note at the subtree's end height. - let Some(tree) = db.orchard_tree_by_height(&subtree.end) else { + let Some(tree) = db.orchard_tree_by_height(&subtree.end_height) else { result = Err("missing note commitment tree at subtree completion height"); - error!(?result, ?subtree.end); + error!(?result, ?subtree.end_height); continue; }; @@ -503,7 +503,7 @@ fn check_orchard_subtrees( error!(?result); } - if subtree.node != node { + if subtree.root != node { result = Err("completed subtree roots should match"); error!(?result); } @@ -511,13 +511,13 @@ fn check_orchard_subtrees( // Check that the final note has a greater subtree index if it didn't complete a subtree. else { let prev_height = subtree - .end + .end_height .previous() .expect("Note commitment subtrees should not end at the minimal height."); let Some(prev_tree) = db.orchard_tree_by_height(&prev_height) else { result = Err("missing note commitment tree below subtree completion height"); - error!(?result, ?subtree.end); + error!(?result, ?subtree.end_height); continue; }; @@ -560,15 +560,15 @@ fn check_orchard_subtrees( }; // Check that the subtree end height matches that in the orchard trees. - if subtree.end != height { + if subtree.end_height != height { let is_complete = tree.is_complete_subtree(); result = Err("bad orchard subtree end height"); - error!(?result, ?subtree.end, ?height, ?index, ?is_complete, ); + error!(?result, ?subtree.end_height, ?height, ?index, ?is_complete, ); } // Check the root if the orchard note commitment tree at this height is a complete subtree. if let Some((_index, node)) = tree.completed_subtree_index_and_root() { - if subtree.node != node { + if subtree.root != node { result = Err("completed subtree roots should match"); error!(?result); } @@ -851,10 +851,10 @@ fn write_sapling_subtree( .expect("writing sapling note commitment subtrees should always succeed."); if subtree.index.0 % 100 == 0 { - info!(end_height = ?subtree.end, index = ?subtree.index.0, "calculated and added sapling subtree"); + info!(end_height = ?subtree.end_height, index = ?subtree.index.0, "calculated and added sapling subtree"); } // This log happens about once per second on recent machines with SSD disks. - debug!(end_height = ?subtree.end, index = ?subtree.index.0, "calculated and added sapling subtree"); + debug!(end_height = ?subtree.end_height, index = ?subtree.index.0, "calculated and added sapling subtree"); } /// Writes an Orchard note commitment subtree to `upgrade_db`. @@ -871,8 +871,8 @@ fn write_orchard_subtree( .expect("writing orchard note commitment subtrees should always succeed."); if subtree.index.0 % 100 == 0 { - info!(end_height = ?subtree.end, index = ?subtree.index.0, "calculated and added orchard subtree"); + info!(end_height = ?subtree.end_height, index = ?subtree.index.0, "calculated and added orchard subtree"); } // This log happens about once per second on recent machines with SSD disks. - debug!(end_height = ?subtree.end, index = ?subtree.index.0, "calculated and added orchard subtree"); + debug!(end_height = ?subtree.end_height, index = ?subtree.index.0, "calculated and added orchard subtree"); } diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade/fix_tree_key_type.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade/fix_tree_key_type.rs index 069d9cb4c2b..4bcd5d8cd4c 100644 --- a/zebra-state/src/service/finalized_state/disk_format/upgrade/fix_tree_key_type.rs +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade/fix_tree_key_type.rs @@ -28,11 +28,6 @@ pub fn run( // Writing the trees back to the database automatically updates their format. let mut batch = DiskWriteBatch::new(); - // Delete the previous `Height` tip key format, which is now a duplicate. - // It's ok to do a full delete, because the trees are restored before the batch is written. - batch.delete_range_sprout_tree(upgrade_db, &Height(0), &MAX_ON_DISK_HEIGHT); - batch.delete_range_history_tree(upgrade_db, &Height(0), &MAX_ON_DISK_HEIGHT); - // Update the sprout tip key format in the database. batch.update_sprout_tree(upgrade_db, &sprout_tip_tree); batch.update_history_tree(upgrade_db, &history_tip_tree); @@ -46,6 +41,24 @@ pub fn run( .write_batch(batch) .expect("updating tree key formats should always succeed"); + // The deletes below can be slow due to tombstones for previously deleted keys, + // so we do it in a separate batch to avoid data races with syncing (#7961). + let mut batch = DiskWriteBatch::new(); + + // Delete the previous `Height` tip key format, which is now a duplicate. + // This doesn't delete the new `()` key format, because it serializes to an empty array. + batch.delete_range_sprout_tree(upgrade_db, &Height(0), &MAX_ON_DISK_HEIGHT); + batch.delete_range_history_tree(upgrade_db, &Height(0), &MAX_ON_DISK_HEIGHT); + + // Return before we write if the upgrade is cancelled. + if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) { + return Err(CancelFormatChange); + } + + upgrade_db + .write_batch(batch) + .expect("cleaning up old tree key formats should always succeed"); + Ok(()) } diff --git a/zebra-state/src/service/finalized_state/zebra_db.rs b/zebra-state/src/service/finalized_state/zebra_db.rs index c370c980f5f..810feff878e 100644 --- a/zebra-state/src/service/finalized_state/zebra_db.rs +++ b/zebra-state/src/service/finalized_state/zebra_db.rs @@ -6,18 +6,19 @@ //! //! # Correctness //! -//! The [`crate::constants::DATABASE_FORMAT_VERSION`] constant must -//! be incremented each time the database format (column, serialization, etc) changes. +//! [`crate::constants::state_database_format_version_in_code()`] must be incremented +//! each time the database format (column, serialization, etc) changes. use std::{ path::Path, sync::{mpsc, Arc}, }; +use semver::Version; use zebra_chain::parameters::Network; use crate::{ - config::{database_format_version_in_code, database_format_version_on_disk}, + config::database_format_version_on_disk, service::finalized_state::{ disk_db::DiskDb, disk_format::{ @@ -25,7 +26,7 @@ use crate::{ upgrade::{DbFormatChange, DbFormatChangeThreadHandle}, }, }, - Config, + write_database_format_version_to_disk, BoxError, Config, }; pub mod block; @@ -34,10 +35,11 @@ pub mod metrics; pub mod shielded; pub mod transparent; -#[cfg(any(test, feature = "proptest-impl"))] +#[cfg(any(test, feature = "proptest-impl", feature = "shielded-scan"))] +// TODO: when the database is split out of zebra-state, always expose these methods. pub mod arbitrary; -/// Wrapper struct to ensure high-level typed database access goes through the correct API. +/// Wrapper struct to ensure high-level `zebra-state` database access goes through the correct API. /// /// `rocksdb` allows concurrent writes through a shared reference, /// so database instances are cloneable. When the final clone is dropped, @@ -51,11 +53,13 @@ pub struct ZebraDb { // /// The configuration for the database. // - // TODO: use the database and version paths instead, and refactor the upgrade code to use paths + // TODO: move the config to DiskDb config: Arc, /// Should format upgrades and format checks be skipped for this instance? /// Only used in test code. + // + // TODO: move this to DiskDb debug_skip_format_upgrades: bool, // Owned State @@ -68,6 +72,8 @@ pub struct ZebraDb { /// /// This field should be dropped before the database field, so the format upgrade task is /// cancelled before the database is dropped. This helps avoid some kinds of deadlocks. + // + // TODO: move the generic upgrade code and fields to DiskDb format_change_handle: Option, /// The inner low-level database wrapper for the RocksDB database. @@ -75,18 +81,43 @@ pub struct ZebraDb { } impl ZebraDb { - /// Opens or creates the database at `config.path` for `network`, + /// Opens or creates the database at a path based on the kind, major version and network, + /// with the supplied column families, preserving any existing column families, /// and returns a shared high-level typed database wrapper. /// /// If `debug_skip_format_upgrades` is true, don't do any format upgrades or format checks. /// This argument is only used when running tests, it is ignored in production code. - pub fn new(config: &Config, network: Network, debug_skip_format_upgrades: bool) -> ZebraDb { - let running_version = database_format_version_in_code(); - let disk_version = database_format_version_on_disk(config, network) - .expect("unable to read database format version file"); + // + // TODO: rename to StateDb and remove the db_kind and column_families_in_code arguments + pub fn new( + config: &Config, + db_kind: impl AsRef, + format_version_in_code: &Version, + network: Network, + debug_skip_format_upgrades: bool, + column_families_in_code: impl IntoIterator, + read_only: bool, + ) -> ZebraDb { + let disk_version = database_format_version_on_disk( + config, + &db_kind, + format_version_in_code.major, + network, + ) + .expect("unable to read database format version file"); // Log any format changes before opening the database, in case opening fails. - let format_change = DbFormatChange::open_database(running_version, disk_version); + let format_change = DbFormatChange::open_database(format_version_in_code, disk_version); + + // Format upgrades try to write to the database, so we always skip them if `read_only` is + // `true`. + // + // We allow skipping the upgrades by the scanner because it doesn't support them yet and we + // also allow skipping them when we are running tests. + // + // TODO: Make scanner support format upgrades, then remove `shielded-scan` here. + let debug_skip_format_upgrades = read_only + || ((cfg!(test) || cfg!(feature = "shielded-scan")) && debug_skip_format_upgrades); // Open the database and do initial checks. let mut db = ZebraDb { @@ -97,23 +128,24 @@ impl ZebraDb { // changes to the default database version. Then we set the correct version in the // upgrade thread. We need to do the version change in this order, because the version // file can only be changed while we hold the RocksDB database lock. - db: DiskDb::new(config, network), + db: DiskDb::new( + config, + db_kind, + format_version_in_code, + network, + column_families_in_code, + read_only, + ), }; - db.spawn_format_change(config, network, format_change); + db.spawn_format_change(format_change); db } /// Launch any required format changes or format checks, and store their thread handle. - pub fn spawn_format_change( - &mut self, - config: &Config, - network: Network, - format_change: DbFormatChange, - ) { - // Always do format upgrades & checks in production code. - if cfg!(test) && self.debug_skip_format_upgrades { + pub fn spawn_format_change(&mut self, format_change: DbFormatChange) { + if self.debug_skip_format_upgrades { return; } @@ -128,18 +160,59 @@ impl ZebraDb { // TODO: // - should debug_stop_at_height wait for the upgrade task to finish? - // - if needed, make upgrade_db into a FinalizedState, - // or move the FinalizedState methods needed for upgrades to ZebraDb. - let format_change_handle = format_change.spawn_format_change( - config.clone(), - network, - initial_tip_height, - upgrade_db, - ); + let format_change_handle = + format_change.spawn_format_change(upgrade_db, initial_tip_height); self.format_change_handle = Some(format_change_handle); } + /// Returns config for this database. + pub fn config(&self) -> &Config { + &self.config + } + + /// Returns the configured database kind for this database. + pub fn db_kind(&self) -> String { + self.db.db_kind() + } + + /// Returns the format version of the running code that created this `ZebraDb` instance in memory. + pub fn format_version_in_code(&self) -> Version { + self.db.format_version_in_code() + } + + /// Returns the fixed major version for this database. + pub fn major_version(&self) -> u64 { + self.db.major_version() + } + + /// Returns the format version of this database on disk. + /// + /// See `database_format_version_on_disk()` for details. + pub fn format_version_on_disk(&self) -> Result, BoxError> { + database_format_version_on_disk( + self.config(), + self.db_kind(), + self.major_version(), + self.network(), + ) + } + + /// Updates the format of this database on disk to the suppled version. + /// + /// See `write_database_format_version_to_disk()` for details. + pub(crate) fn update_format_version_on_disk( + &self, + new_version: &Version, + ) -> Result<(), BoxError> { + write_database_format_version_to_disk( + self.config(), + self.db_kind(), + new_version, + self.network(), + ) + } + /// Returns the configured network for this database. pub fn network(&self) -> Network { self.db.network() @@ -168,13 +241,16 @@ impl ZebraDb { /// /// See [`DiskDb::shutdown`] for details. pub fn shutdown(&mut self, force: bool) { + // Are we shutting down the underlying database instance? + let is_shutdown = force || self.db.shared_database_owners() <= 1; + // # Concurrency // // The format upgrade task should be cancelled before the database is flushed or shut down. // This helps avoid some kinds of deadlocks. // // See also the correctness note in `DiskDb::shutdown()`. - if force || self.db.shared_database_owners() <= 1 { + if !self.debug_skip_format_upgrades && is_shutdown { if let Some(format_change_handle) = self.format_change_handle.as_mut() { format_change_handle.force_cancel(); } @@ -191,8 +267,13 @@ impl ZebraDb { // which would then make unrelated PRs fail when Zebra starts up. // If the upgrade has completed, or we've done a downgrade, check the state is valid. - let disk_version = database_format_version_on_disk(&self.config, self.network()) - .expect("unexpected invalid or unreadable database version file"); + let disk_version = database_format_version_on_disk( + &self.config, + self.db_kind(), + self.major_version(), + self.network(), + ) + .expect("unexpected invalid or unreadable database version file"); if let Some(disk_version) = disk_version { // We need to keep the cancel handle until the format check has finished, @@ -201,14 +282,12 @@ impl ZebraDb { // We block here because the checks are quick and database validity is // consensus-critical. - if disk_version >= database_format_version_in_code() { - DbFormatChange::check_new_blocks() + if disk_version >= self.db.format_version_in_code() { + DbFormatChange::check_new_blocks(self) .run_format_change_or_check( - &self.config, - self.network(), - // This argument is not used by the format check. - None, self, + // The initial tip height is not used by the new blocks format check. + None, &never_cancel_receiver, ) .expect("cancel handle is never used"); diff --git a/zebra-state/src/service/finalized_state/zebra_db/arbitrary.rs b/zebra-state/src/service/finalized_state/zebra_db/arbitrary.rs index 2b9f03526a8..8a83a2894c5 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/arbitrary.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/arbitrary.rs @@ -1,5 +1,7 @@ //! Arbitrary value generation and test harnesses for high-level typed database access. +#![allow(unused_imports)] + use std::ops::Deref; use zebra_chain::{amount::NonNegative, block::Block, sprout, value_balance::ValueBalance}; @@ -21,13 +23,14 @@ impl Deref for ZebraDb { impl ZebraDb { /// Returns the inner database. /// - /// This is a test-only method, because it allows write access + /// This is a test-only and shielded-scan-only method, because it allows write access /// and raw read access to the RocksDB instance. pub fn db(&self) -> &DiskDb { &self.db } /// Allow to set up a fake value pool in the database for testing purposes. + #[cfg(any(test, feature = "proptest-impl"))] pub fn set_finalized_value_pool(&self, fake_value_pool: ValueBalance) { let mut batch = DiskWriteBatch::new(); let value_pool_cf = self.db().cf_handle("tip_chain_value_pool").unwrap(); @@ -38,6 +41,7 @@ impl ZebraDb { /// Artificially prime the note commitment tree anchor sets with anchors /// referenced in a block, for testing purposes _only_. + #[cfg(any(test, feature = "proptest-impl"))] pub fn populate_with_anchors(&self, block: &Block) { let mut batch = DiskWriteBatch::new(); diff --git a/zebra-state/src/service/finalized_state/zebra_db/block.rs b/zebra-state/src/service/finalized_state/zebra_db/block.rs index 0a1a49e72cb..0a5a7b5faa6 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block.rs @@ -6,8 +6,8 @@ //! //! # Correctness //! -//! The [`crate::constants::DATABASE_FORMAT_VERSION`] constant must -//! be incremented each time the database format (column, serialization, etc) changes. +//! [`crate::constants::state_database_format_version_in_code()`] must be incremented +//! each time the database format (column, serialization, etc) changes. use std::{ collections::{BTreeMap, HashMap, HashSet}, @@ -30,7 +30,7 @@ use zebra_chain::{ }; use crate::{ - request::SemanticallyVerifiedBlockWithTrees, + request::FinalizedBlock, service::finalized_state::{ disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, disk_format::{ @@ -39,7 +39,7 @@ use crate::{ }, zebra_db::{metrics::block_precommit_metrics, ZebraDb}, }, - BoxError, HashOrHeight, SemanticallyVerifiedBlock, + BoxError, HashOrHeight, }; #[cfg(test)] @@ -292,13 +292,12 @@ impl ZebraDb { /// - Propagates any errors from updating history and note commitment trees pub(in super::super) fn write_block( &mut self, - finalized: SemanticallyVerifiedBlockWithTrees, + finalized: FinalizedBlock, prev_note_commitment_trees: Option, network: Network, source: &str, ) -> Result { let tx_hash_indexes: HashMap = finalized - .verified .transaction_hashes .iter() .enumerate() @@ -311,12 +310,11 @@ impl ZebraDb { // simplify the spent_utxos location lookup code, // and remove the extra new_outputs_by_out_loc argument let new_outputs_by_out_loc: BTreeMap = finalized - .verified .new_outputs .iter() .map(|(outpoint, ordered_utxo)| { ( - lookup_out_loc(finalized.verified.height, outpoint, &tx_hash_indexes), + lookup_out_loc(finalized.height, outpoint, &tx_hash_indexes), ordered_utxo.utxo.clone(), ) }) @@ -325,7 +323,6 @@ impl ZebraDb { // Get a list of the spent UTXOs, before we delete any from the database let spent_utxos: Vec<(transparent::OutPoint, OutputLocation, transparent::Utxo)> = finalized - .verified .block .transactions .iter() @@ -337,13 +334,12 @@ impl ZebraDb { // Some utxos are spent in the same block, so they will be in // `tx_hash_indexes` and `new_outputs` self.output_location(&outpoint).unwrap_or_else(|| { - lookup_out_loc(finalized.verified.height, &outpoint, &tx_hash_indexes) + lookup_out_loc(finalized.height, &outpoint, &tx_hash_indexes) }), self.utxo(&outpoint) .map(|ordered_utxo| ordered_utxo.utxo) .or_else(|| { finalized - .verified .new_outputs .get(&outpoint) .map(|ordered_utxo| ordered_utxo.utxo.clone()) @@ -368,7 +364,6 @@ impl ZebraDb { .values() .chain( finalized - .verified .new_outputs .values() .map(|ordered_utxo| &ordered_utxo.utxo), @@ -403,11 +398,11 @@ impl ZebraDb { tracing::trace!(?source, "committed block from"); - Ok(finalized.verified.hash) + Ok(finalized.hash) } /// Writes the given batch to the database. - pub(crate) fn write_batch(&self, batch: DiskWriteBatch) -> Result<(), rocksdb::Error> { + pub fn write_batch(&self, batch: DiskWriteBatch) -> Result<(), rocksdb::Error> { self.db.write(batch) } } @@ -446,7 +441,7 @@ impl DiskWriteBatch { &mut self, zebra_db: &ZebraDb, network: Network, - finalized: &SemanticallyVerifiedBlockWithTrees, + finalized: &FinalizedBlock, new_outputs_by_out_loc: BTreeMap, spent_utxos_by_outpoint: HashMap, spent_utxos_by_out_loc: BTreeMap, @@ -455,8 +450,9 @@ impl DiskWriteBatch { prev_note_commitment_trees: Option, ) -> Result<(), BoxError> { let db = &zebra_db.db; + // Commit block, transaction, and note commitment tree data. - self.prepare_block_header_and_transaction_data_batch(db, &finalized.verified)?; + self.prepare_block_header_and_transaction_data_batch(db, finalized)?; // The consensus rules are silent on shielded transactions in the genesis block, // because there aren't any in the mainnet or testnet genesis blocks. @@ -464,7 +460,7 @@ impl DiskWriteBatch { // which is already present from height 1 to the first shielded transaction. // // In Zebra we include the nullifiers and note commitments in the genesis block because it simplifies our code. - self.prepare_shielded_transaction_batch(db, &finalized.verified)?; + self.prepare_shielded_transaction_batch(db, finalized)?; self.prepare_trees_batch(zebra_db, finalized, prev_note_commitment_trees)?; // # Consensus @@ -477,12 +473,12 @@ impl DiskWriteBatch { // So we ignore the genesis UTXO, transparent address index, and value pool updates // for the genesis block. This also ignores genesis shielded value pool updates, but there // aren't any of those on mainnet or testnet. - if !finalized.verified.height.is_min() { + if !finalized.height.is_min() { // Commit transaction indexes self.prepare_transparent_transaction_batch( db, network, - &finalized.verified, + finalized, &new_outputs_by_out_loc, &spent_utxos_by_outpoint, &spent_utxos_by_out_loc, @@ -491,19 +487,15 @@ impl DiskWriteBatch { // Commit UTXOs and value pools self.prepare_chain_value_pools_batch( - db, - &finalized.verified, + zebra_db, + finalized, spent_utxos_by_outpoint, value_pool, )?; } // The block has passed contextual validation, so update the metrics - block_precommit_metrics( - &finalized.verified.block, - finalized.verified.hash, - finalized.verified.height, - ); + block_precommit_metrics(&finalized.block, finalized.hash, finalized.height); Ok(()) } @@ -518,7 +510,7 @@ impl DiskWriteBatch { pub fn prepare_block_header_and_transaction_data_batch( &mut self, db: &DiskDb, - finalized: &SemanticallyVerifiedBlock, + finalized: &FinalizedBlock, ) -> Result<(), BoxError> { // Blocks let block_header_by_height = db.cf_handle("block_header_by_height").unwrap(); @@ -530,7 +522,7 @@ impl DiskWriteBatch { let hash_by_tx_loc = db.cf_handle("hash_by_tx_loc").unwrap(); let tx_loc_by_hash = db.cf_handle("tx_loc_by_hash").unwrap(); - let SemanticallyVerifiedBlock { + let FinalizedBlock { block, hash, height, diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/vectors.rs b/zebra-state/src/service/finalized_state/zebra_db/block/tests/vectors.rs index 93db70f19a1..f0ce0d0f414 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/vectors.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/vectors.rs @@ -26,7 +26,9 @@ use zebra_chain::{ use zebra_test::vectors::{MAINNET_BLOCKS, TESTNET_BLOCKS}; use crate::{ - service::finalized_state::{disk_db::DiskWriteBatch, ZebraDb}, + constants::{state_database_format_version_in_code, STATE_DATABASE_KIND}, + request::{FinalizedBlock, Treestate}, + service::finalized_state::{disk_db::DiskWriteBatch, ZebraDb, STATE_COLUMN_FAMILIES_IN_CODE}, CheckpointVerifiedBlock, Config, }; @@ -79,9 +81,15 @@ fn test_block_db_round_trip_with( let state = ZebraDb::new( &Config::ephemeral(), + STATE_DATABASE_KIND, + &state_database_format_version_in_code(), network, // The raw database accesses in this test create invalid database formats. true, + STATE_COLUMN_FAMILIES_IN_CODE + .iter() + .map(ToString::to_string), + false, ); // Check that each block round-trips to the database @@ -108,7 +116,7 @@ fn test_block_db_round_trip_with( // Now, use the database let original_block = Arc::new(original_block); - let finalized = if original_block.coinbase_height().is_some() { + let checkpoint_verified = if original_block.coinbase_height().is_some() { original_block.clone().into() } else { // Fake a zero height @@ -119,6 +127,10 @@ fn test_block_db_round_trip_with( ) }; + let dummy_treestate = Treestate::default(); + let finalized = + FinalizedBlock::from_checkpoint_verified(checkpoint_verified, dummy_treestate); + // Skip validation by writing the block directly to the database let mut batch = DiskWriteBatch::new(); batch diff --git a/zebra-state/src/service/finalized_state/zebra_db/chain.rs b/zebra-state/src/service/finalized_state/zebra_db/chain.rs index 925762ce476..842fc9453c6 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/chain.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/chain.rs @@ -8,26 +8,90 @@ //! //! # Correctness //! -//! The [`crate::constants::DATABASE_FORMAT_VERSION`] constant must -//! be incremented each time the database format (column, serialization, etc) changes. +//! [`crate::constants::state_database_format_version_in_code()`] must be incremented +//! each time the database format (column, serialization, etc) changes. -use std::{borrow::Borrow, collections::HashMap, sync::Arc}; +use std::{ + borrow::Borrow, + collections::{BTreeMap, HashMap}, + sync::Arc, +}; use zebra_chain::{ - amount::NonNegative, block::Height, history_tree::HistoryTree, transparent, + amount::NonNegative, + block::Height, + history_tree::{HistoryTree, NonEmptyHistoryTree}, + transparent, value_balance::ValueBalance, }; use crate::{ + request::FinalizedBlock, service::finalized_state::{ - disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, - disk_format::RawBytes, - zebra_db::ZebraDb, + disk_db::DiskWriteBatch, disk_format::RawBytes, zebra_db::ZebraDb, TypedColumnFamily, }, - BoxError, SemanticallyVerifiedBlock, + BoxError, }; +/// The name of the History Tree column family. +/// +/// This constant should be used so the compiler can detect typos. +pub const HISTORY_TREE: &str = "history_tree"; + +/// The type for reading history trees from the database. +/// +/// This constant should be used so the compiler can detect incorrectly typed accesses to the +/// column family. +pub type HistoryTreeCf<'cf> = TypedColumnFamily<'cf, (), NonEmptyHistoryTree>; + +/// The legacy (1.3.0 and earlier) type for reading history trees from the database. +/// This type should not be used in new code. +pub type LegacyHistoryTreeCf<'cf> = TypedColumnFamily<'cf, Height, NonEmptyHistoryTree>; + +/// A generic raw key type for reading history trees from the database, regardless of the database version. +/// This type should not be used in new code. +pub type RawHistoryTreeCf<'cf> = TypedColumnFamily<'cf, RawBytes, NonEmptyHistoryTree>; + +/// The name of the chain value pools column family. +/// +/// This constant should be used so the compiler can detect typos. +pub const CHAIN_VALUE_POOLS: &str = "tip_chain_value_pool"; + +/// The type for reading value pools from the database. +/// +/// This constant should be used so the compiler can detect incorrectly typed accesses to the +/// column family. +pub type ChainValuePoolsCf<'cf> = TypedColumnFamily<'cf, (), ValueBalance>; + impl ZebraDb { + // Column family convenience methods + + /// Returns a typed handle to the `history_tree` column family. + pub(crate) fn history_tree_cf(&self) -> HistoryTreeCf { + HistoryTreeCf::new(&self.db, HISTORY_TREE) + .expect("column family was created when database was created") + } + + /// Returns a legacy typed handle to the `history_tree` column family. + /// This should not be used in new code. + pub(crate) fn legacy_history_tree_cf(&self) -> LegacyHistoryTreeCf { + LegacyHistoryTreeCf::new(&self.db, HISTORY_TREE) + .expect("column family was created when database was created") + } + + /// Returns a generic raw key typed handle to the `history_tree` column family. + /// This should not be used in new code. + pub(crate) fn raw_history_tree_cf(&self) -> RawHistoryTreeCf { + RawHistoryTreeCf::new(&self.db, HISTORY_TREE) + .expect("column family was created when database was created") + } + + /// Returns a typed handle to the chain value pools column family. + pub(crate) fn chain_value_pools_cf(&self) -> ChainValuePoolsCf { + ChainValuePoolsCf::new(&self.db, CHAIN_VALUE_POOLS) + .expect("column family was created when database was created") + } + // History tree methods /// Returns the ZIP-221 history tree of the finalized tip. @@ -35,11 +99,7 @@ impl ZebraDb { /// If history trees have not been activated yet (pre-Heartwood), or the state is empty, /// returns an empty history tree. pub fn history_tree(&self) -> Arc { - if self.is_empty() { - return Arc::::default(); - } - - let history_tree_cf = self.db.cf_handle("history_tree").unwrap(); + let history_tree_cf = self.history_tree_cf(); // # Backwards Compatibility // @@ -55,36 +115,42 @@ impl ZebraDb { // // So we use the empty key `()`. Since the key has a constant value, we will always read // the latest tree. - let mut history_tree: Option> = self.db.zs_get(&history_tree_cf, &()); + let mut history_tree = history_tree_cf.zs_get(&()); if history_tree.is_none() { - let tip_height = self - .finalized_tip_height() - .expect("just checked for an empty database"); - - history_tree = self.db.zs_get(&history_tree_cf, &tip_height); + let legacy_history_tree_cf = self.legacy_history_tree_cf(); + + // In Zebra 1.4.0 and later, we only update the history tip tree when it has changed (for every block after heartwood). + // But we write with a `()` key, not a height key. + // So we need to look for the most recent update height if the `()` key has never been written. + history_tree = legacy_history_tree_cf + .zs_last_key_value() + .map(|(_height_key, tree_value)| tree_value); } - history_tree.unwrap_or_default() + Arc::new(HistoryTree::from(history_tree)) } /// Returns all the history tip trees. - /// We only store the history tree for the tip, so this method is mainly used in tests. - pub fn history_trees_full_tip( - &self, - ) -> impl Iterator)> + '_ { - let history_tree_cf = self.db.cf_handle("history_tree").unwrap(); - - self.db.zs_range_iter(&history_tree_cf, .., false) + /// We only store the history tree for the tip, so this method is only used in tests and + /// upgrades. + pub(crate) fn history_trees_full_tip(&self) -> BTreeMap> { + let raw_history_tree_cf = self.raw_history_tree_cf(); + + raw_history_tree_cf + .zs_forward_range_iter(..) + .map(|(raw_key, history_tree)| (raw_key, Arc::new(HistoryTree::from(history_tree)))) + .collect() } // Value pool methods /// Returns the stored `ValueBalance` for the best chain at the finalized tip height. pub fn finalized_value_pool(&self) -> ValueBalance { - let value_pool_cf = self.db.cf_handle("tip_chain_value_pool").unwrap(); - self.db - .zs_get(&value_pool_cf, &()) + let chain_value_pools_cf = self.chain_value_pools_cf(); + + chain_value_pools_cf + .zs_get(&()) .unwrap_or_else(ValueBalance::zero) } } @@ -93,11 +159,14 @@ impl DiskWriteBatch { // History tree methods /// Updates the history tree for the tip, if it is not empty. - pub fn update_history_tree(&mut self, zebra_db: &ZebraDb, tree: &HistoryTree) { - let history_tree_cf = zebra_db.db.cf_handle("history_tree").unwrap(); + /// + /// The batch must be written to the database by the caller. + pub fn update_history_tree(&mut self, db: &ZebraDb, tree: &HistoryTree) { + let history_tree_cf = db.history_tree_cf().with_batch_for_writing(self); if let Some(tree) = tree.as_ref().as_ref() { - self.zs_insert(&history_tree_cf, (), tree); + // The batch is modified by this method and written by the caller. + let _ = history_tree_cf.zs_insert(&(), tree); } } @@ -106,11 +175,20 @@ impl DiskWriteBatch { /// /// From state format 25.3.0 onwards, the history trees are indexed by an empty key, /// so this method does nothing. - pub fn delete_range_history_tree(&mut self, zebra_db: &ZebraDb, from: &Height, to: &Height) { - let history_tree_cf = zebra_db.db.cf_handle("history_tree").unwrap(); + /// + /// The batch must be written to the database by the caller. + pub fn delete_range_history_tree( + &mut self, + db: &ZebraDb, + from: &Height, + until_strictly_before: &Height, + ) { + let history_tree_cf = db.legacy_history_tree_cf().with_batch_for_writing(self); + // The batch is modified by this method and written by the caller. + // // TODO: convert zs_delete_range() to take std::ops::RangeBounds - self.zs_delete_range(&history_tree_cf, from, to); + let _ = history_tree_cf.zs_delete_range(from, until_strictly_before); } // Value pool methods @@ -127,17 +205,19 @@ impl DiskWriteBatch { #[allow(clippy::unwrap_in_result)] pub fn prepare_chain_value_pools_batch( &mut self, - db: &DiskDb, - finalized: &SemanticallyVerifiedBlock, + db: &ZebraDb, + finalized: &FinalizedBlock, utxos_spent_by_block: HashMap, value_pool: ValueBalance, ) -> Result<(), BoxError> { - let tip_chain_value_pool = db.cf_handle("tip_chain_value_pool").unwrap(); + let chain_value_pools_cf = db.chain_value_pools_cf().with_batch_for_writing(self); - let SemanticallyVerifiedBlock { block, .. } = finalized; + let FinalizedBlock { block, .. } = finalized; let new_pool = value_pool.add_block(block.borrow(), &utxos_spent_by_block)?; - self.zs_insert(&tip_chain_value_pool, (), new_pool); + + // The batch is modified by this method and written by the caller. + let _ = chain_value_pools_cf.zs_insert(&(), &new_pool); Ok(()) } diff --git a/zebra-state/src/service/finalized_state/zebra_db/metrics.rs b/zebra-state/src/service/finalized_state/zebra_db/metrics.rs index 75b342e2cdc..9a5b5aef50f 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/metrics.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/metrics.rs @@ -38,36 +38,24 @@ pub(crate) fn block_precommit_metrics(block: &Block, hash: block::Hash, height: if height.is_min() { "genesis " } else { "" } ); - metrics::counter!("state.finalized.block.count", 1); - metrics::gauge!("state.finalized.block.height", height.0 as f64); + metrics::counter!("state.finalized.block.count").increment(1); + metrics::gauge!("state.finalized.block.height").set(height.0 as f64); - metrics::counter!( - "state.finalized.cumulative.transactions", - transaction_count as u64 - ); + metrics::counter!("state.finalized.cumulative.transactions") + .increment(transaction_count as u64); - metrics::counter!( - "state.finalized.cumulative.sprout_nullifiers", - sprout_nullifier_count as u64 - ); - metrics::counter!( - "state.finalized.cumulative.sapling_nullifiers", - sapling_nullifier_count as u64 - ); - metrics::counter!( - "state.finalized.cumulative.orchard_nullifiers", - orchard_nullifier_count as u64 - ); + metrics::counter!("state.finalized.cumulative.sprout_nullifiers") + .increment(sprout_nullifier_count as u64); + metrics::counter!("state.finalized.cumulative.sapling_nullifiers") + .increment(sapling_nullifier_count as u64); + metrics::counter!("state.finalized.cumulative.orchard_nullifiers") + .increment(orchard_nullifier_count as u64); // The outputs from the genesis block can't be spent, so we skip them here. if !height.is_min() { - metrics::counter!( - "state.finalized.cumulative.transparent_prevouts", - transparent_prevout_count as u64 - ); - metrics::counter!( - "state.finalized.cumulative.transparent_newouts", - transparent_newout_count as u64 - ); + metrics::counter!("state.finalized.cumulative.transparent_prevouts") + .increment(transparent_prevout_count as u64); + metrics::counter!("state.finalized.cumulative.transparent_newouts") + .increment(transparent_newout_count as u64); } } diff --git a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs index 260d202eac9..4bba75b1891 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs @@ -9,8 +9,8 @@ //! //! # Correctness //! -//! The [`crate::constants::DATABASE_FORMAT_VERSION`] constant must -//! be incremented each time the database format (column, serialization, etc) changes. +//! [`crate::constants::state_database_format_version_in_code()`] must be incremented +//! each time the database format (column, serialization, etc) changes. use std::{ collections::{BTreeMap, HashMap}, @@ -27,13 +27,13 @@ use zebra_chain::{ }; use crate::{ - request::SemanticallyVerifiedBlockWithTrees, + request::{FinalizedBlock, Treestate}, service::finalized_state::{ disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, disk_format::RawBytes, zebra_db::ZebraDb, }, - BoxError, SemanticallyVerifiedBlock, + BoxError, }; // Doc-only items @@ -109,11 +109,13 @@ impl ZebraDb { self.db.zs_get(&sprout_tree_cf, &()); if sprout_tree.is_none() { - let tip_height = self - .finalized_tip_height() - .expect("just checked for an empty database"); - - sprout_tree = self.db.zs_get(&sprout_tree_cf, &tip_height); + // In Zebra 1.4.0 and later, we don't update the sprout tip tree unless it is changed. + // And we write with a `()` key, not a height key. + // So we need to look for the most recent update height if the `()` key has never been written. + sprout_tree = self + .db + .zs_last_key_value(&sprout_tree_cf) + .map(|(_key, tree_value): (Height, _)| tree_value); } sprout_tree.expect("Sprout note commitment tree must exist if there is a finalized tip") @@ -153,7 +155,7 @@ impl ZebraDb { &self, ) -> impl Iterator)> + '_ { let sprout_trees = self.db.cf_handle("sprout_note_commitment_tree").unwrap(); - self.db.zs_range_iter(&sprout_trees, .., false) + self.db.zs_forward_range_iter(&sprout_trees, ..) } // # Sapling trees @@ -207,7 +209,7 @@ impl ZebraDb { R: std::ops::RangeBounds, { let sapling_trees = self.db.cf_handle("sapling_note_commitment_tree").unwrap(); - self.db.zs_range_iter(&sapling_trees, range, false) + self.db.zs_forward_range_iter(&sapling_trees, range) } /// Returns the Sapling note commitment trees in the reversed range, in decreasing height order. @@ -257,7 +259,7 @@ impl ZebraDb { .unwrap(); self.db - .zs_range_iter(&sapling_subtrees, range, false) + .zs_forward_range_iter(&sapling_subtrees, range) .collect() } @@ -275,7 +277,7 @@ impl ZebraDb { ) = self.db.zs_last_key_value(&sapling_subtrees)?; let tip_height = self.finalized_tip_height()?; - if subtree_data.end != tip_height { + if subtree_data.end_height != tip_height { return None; } @@ -333,7 +335,7 @@ impl ZebraDb { R: std::ops::RangeBounds, { let orchard_trees = self.db.cf_handle("orchard_note_commitment_tree").unwrap(); - self.db.zs_range_iter(&orchard_trees, range, false) + self.db.zs_forward_range_iter(&orchard_trees, range) } /// Returns the Orchard note commitment trees in the reversed range, in decreasing height order. @@ -383,7 +385,7 @@ impl ZebraDb { .unwrap(); self.db - .zs_range_iter(&orchard_subtrees, range, false) + .zs_forward_range_iter(&orchard_subtrees, range) .collect() } @@ -401,7 +403,7 @@ impl ZebraDb { ) = self.db.zs_last_key_value(&orchard_subtrees)?; let tip_height = self.finalized_tip_height()?; - if subtree_data.end != tip_height { + if subtree_data.end_height != tip_height { return None; } @@ -436,9 +438,9 @@ impl DiskWriteBatch { pub fn prepare_shielded_transaction_batch( &mut self, db: &DiskDb, - finalized: &SemanticallyVerifiedBlock, + finalized: &FinalizedBlock, ) -> Result<(), BoxError> { - let SemanticallyVerifiedBlock { block, .. } = finalized; + let FinalizedBlock { block, .. } = finalized; // Index each transaction's shielded data for transaction in &block.transactions { @@ -491,11 +493,18 @@ impl DiskWriteBatch { pub fn prepare_trees_batch( &mut self, zebra_db: &ZebraDb, - finalized: &SemanticallyVerifiedBlockWithTrees, + finalized: &FinalizedBlock, prev_note_commitment_trees: Option, ) -> Result<(), BoxError> { - let height = finalized.verified.height; - let trees = finalized.treestate.note_commitment_trees.clone(); + let FinalizedBlock { + height, + treestate: + Treestate { + note_commitment_trees, + history_tree, + }, + .. + } = finalized; let prev_sprout_tree = prev_note_commitment_trees.as_ref().map_or_else( || zebra_db.sprout_tree_for_tip(), @@ -511,29 +520,29 @@ impl DiskWriteBatch { ); // Update the Sprout tree and store its anchor only if it has changed - if height.is_min() || prev_sprout_tree != trees.sprout { - self.update_sprout_tree(zebra_db, &trees.sprout) + if height.is_min() || prev_sprout_tree != note_commitment_trees.sprout { + self.update_sprout_tree(zebra_db, ¬e_commitment_trees.sprout) } // Store the Sapling tree, anchor, and any new subtrees only if they have changed - if height.is_min() || prev_sapling_tree != trees.sapling { - self.create_sapling_tree(zebra_db, &height, &trees.sapling); + if height.is_min() || prev_sapling_tree != note_commitment_trees.sapling { + self.create_sapling_tree(zebra_db, height, ¬e_commitment_trees.sapling); - if let Some(subtree) = trees.sapling_subtree { + if let Some(subtree) = note_commitment_trees.sapling_subtree { self.insert_sapling_subtree(zebra_db, &subtree); } } // Store the Orchard tree, anchor, and any new subtrees only if they have changed - if height.is_min() || prev_orchard_tree != trees.orchard { - self.create_orchard_tree(zebra_db, &height, &trees.orchard); + if height.is_min() || prev_orchard_tree != note_commitment_trees.orchard { + self.create_orchard_tree(zebra_db, height, ¬e_commitment_trees.orchard); - if let Some(subtree) = trees.orchard_subtree { + if let Some(subtree) = note_commitment_trees.orchard_subtree { self.insert_orchard_subtree(zebra_db, &subtree); } } - self.update_history_tree(zebra_db, &finalized.treestate.history_tree); + self.update_history_tree(zebra_db, history_tree); Ok(()) } diff --git a/zebra-state/src/service/finalized_state/zebra_db/transparent.rs b/zebra-state/src/service/finalized_state/zebra_db/transparent.rs index a9caed75c46..7c45911824e 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/transparent.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/transparent.rs @@ -8,8 +8,8 @@ //! //! # Correctness //! -//! The [`crate::constants::DATABASE_FORMAT_VERSION`] constant must -//! be incremented each time the database format (column, serialization, etc) changes. +//! [`crate::constants::state_database_format_version_in_code()`] must be incremented +//! each time the database format (column, serialization, etc) changes. use std::{ collections::{BTreeMap, BTreeSet, HashMap, HashSet}, @@ -25,6 +25,7 @@ use zebra_chain::{ }; use crate::{ + request::FinalizedBlock, service::finalized_state::{ disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, disk_format::{ @@ -36,7 +37,7 @@ use crate::{ }, zebra_db::ZebraDb, }, - BoxError, SemanticallyVerifiedBlock, + BoxError, }; impl ZebraDb { @@ -241,11 +242,7 @@ impl ZebraDb { AddressTransaction::address_iterator_range(address_location, query_height_range); self.db - .zs_range_iter( - &tx_loc_by_transparent_addr_loc, - transaction_location_range, - false, - ) + .zs_forward_range_iter(&tx_loc_by_transparent_addr_loc, transaction_location_range) .map(|(tx_loc, ())| tx_loc) .collect() } @@ -347,13 +344,13 @@ impl DiskWriteBatch { &mut self, db: &DiskDb, network: Network, - finalized: &SemanticallyVerifiedBlock, + finalized: &FinalizedBlock, new_outputs_by_out_loc: &BTreeMap, spent_utxos_by_outpoint: &HashMap, spent_utxos_by_out_loc: &BTreeMap, mut address_balances: HashMap, ) -> Result<(), BoxError> { - let SemanticallyVerifiedBlock { block, height, .. } = finalized; + let FinalizedBlock { block, height, .. } = finalized; // Update created and spent transparent outputs self.prepare_new_transparent_outputs_batch( diff --git a/zebra-state/src/service/non_finalized_state.rs b/zebra-state/src/service/non_finalized_state.rs index c01c16767d9..cc9cfbabdb0 100644 --- a/zebra-state/src/service/non_finalized_state.rs +++ b/zebra-state/src/service/non_finalized_state.rs @@ -182,7 +182,6 @@ impl NonFinalizedState { // Chain::cmp uses the partial cumulative work, and the hash of the tip block. // Neither of these fields has interior mutability. // (And when the tip block is dropped for a chain, the chain is also dropped.) - #[allow(clippy::mutable_key_type)] let chains = mem::take(&mut self.chain_set); let mut chains = chains.into_iter(); @@ -418,12 +417,12 @@ impl NonFinalizedState { chain_push_result.expect("scope has finished") } - /// Returns the length of the non-finalized portion of the current best chain. - pub fn best_chain_len(&self) -> u32 { - self.best_chain() - .expect("only called after inserting a block") - .blocks - .len() as u32 + /// Returns the length of the non-finalized portion of the current best chain + /// or `None` if the best chain has no blocks. + pub fn best_chain_len(&self) -> Option { + // This `as` can't overflow because the number of blocks in the chain is limited to i32::MAX, + // and the non-finalized chain is further limited by the fork length (slightly over 100 blocks). + Some(self.best_chain()?.blocks.len() as u32) } /// Returns `true` if `hash` is contained in the non-finalized portion of any @@ -582,7 +581,6 @@ impl NonFinalizedState { /// /// The chain can be an existing chain in the non-finalized state, or a freshly /// created fork. - #[allow(clippy::unwrap_in_result)] fn parent_chain(&self, parent_hash: block::Hash) -> Result, ValidateContextError> { match self.find_chain(|chain| chain.non_finalized_tip_hash() == parent_hash) { // Clone the existing Arc in the non-finalized state @@ -619,8 +617,8 @@ impl NonFinalizedState { return; } - metrics::counter!("state.memory.committed.block.count", 1); - metrics::gauge!("state.memory.committed.block.height", height.0 as f64); + metrics::counter!("state.memory.committed.block.count").increment(1); + metrics::gauge!("state.memory.committed.block.height").set(height.0 as f64); if self .best_chain() @@ -628,8 +626,8 @@ impl NonFinalizedState { .non_finalized_tip_hash() == hash { - metrics::counter!("state.memory.best.committed.block.count", 1); - metrics::gauge!("state.memory.best.committed.block.height", height.0 as f64); + metrics::counter!("state.memory.best.committed.block.count").increment(1); + metrics::gauge!("state.memory.best.committed.block.height").set(height.0 as f64); } self.update_metrics_for_chains(); @@ -641,11 +639,9 @@ impl NonFinalizedState { return; } - metrics::gauge!("state.memory.chain.count", self.chain_set.len() as f64); - metrics::gauge!( - "state.memory.best.chain.length", - self.best_chain_len() as f64, - ); + metrics::gauge!("state.memory.chain.count").set(self.chain_set.len() as f64); + metrics::gauge!("state.memory.best.chain.length",) + .set(self.best_chain_len().unwrap_or_default() as f64); } /// Update the progress bars after any chain is modified. diff --git a/zebra-state/src/service/non_finalized_state/chain.rs b/zebra-state/src/service/non_finalized_state/chain.rs index e4ccf9c308b..fc31605871b 100644 --- a/zebra-state/src/service/non_finalized_state/chain.rs +++ b/zebra-state/src/service/non_finalized_state/chain.rs @@ -694,7 +694,7 @@ impl Chain { self.sapling_subtrees .iter() - .find(|(_index, subtree)| subtree.end == height) + .find(|(_index, subtree)| subtree.end_height == height) .map(|(index, subtree)| subtree.with_index(*index)) } @@ -898,7 +898,7 @@ impl Chain { self.orchard_subtrees .iter() - .find(|(_index, subtree)| subtree.end == height) + .find(|(_index, subtree)| subtree.end_height == height) .map(|(index, subtree)| subtree.with_index(*index)) } diff --git a/zebra-state/src/service/non_finalized_state/tests/prop.rs b/zebra-state/src/service/non_finalized_state/tests/prop.rs index b7a3e5b00d4..d40b83137f2 100644 --- a/zebra-state/src/service/non_finalized_state/tests/prop.rs +++ b/zebra-state/src/service/non_finalized_state/tests/prop.rs @@ -40,40 +40,40 @@ fn push_genesis_chain() -> Result<()> { let _init_guard = zebra_test::init(); proptest!( - ProptestConfig::with_cases(env::var("PROPTEST_CASES") - .ok() - .and_then(|v| v.parse().ok()) - .unwrap_or(DEFAULT_PARTIAL_CHAIN_PROPTEST_CASES)), - |((chain, count, network, empty_tree) in PreparedChain::default())| { - prop_assert!(empty_tree.is_none()); - - let mut only_chain = Chain::new(network, Height(0), Default::default(), Default::default(), Default::default(), empty_tree, ValueBalance::zero()); - // contains the block value pool changes and chain value pool balances for each height - let mut chain_values = BTreeMap::new(); - - chain_values.insert(None, (None, only_chain.chain_value_pools.into())); - - for block in chain.iter().take(count).skip(1).cloned() { - let block = - ContextuallyVerifiedBlock::with_block_and_spent_utxos( - block, - only_chain.unspent_utxos(), - ) - .map_err(|e| (e, chain_values.clone())) - .expect("invalid block value pool change"); - - chain_values.insert(block.height.into(), (block.chain_value_pool_change.into(), None)); - - only_chain = only_chain - .push(block.clone()) - .map_err(|e| (e, chain_values.clone())) - .expect("invalid chain value pools"); - - chain_values.insert(block.height.into(), (block.chain_value_pool_change.into(), only_chain.chain_value_pools.into())); - } + ProptestConfig::with_cases(env::var("PROPTEST_CASES") + .ok() + .and_then(|v| v.parse().ok()) + .unwrap_or(DEFAULT_PARTIAL_CHAIN_PROPTEST_CASES)), + |((chain, count, network, empty_tree) in PreparedChain::default())| { + prop_assert!(empty_tree.is_none()); + + let mut only_chain = Chain::new(network, Height(0), Default::default(), Default::default(), Default::default(), empty_tree, ValueBalance::zero()); + // contains the block value pool changes and chain value pool balances for each height + let mut chain_values = BTreeMap::new(); + + chain_values.insert(None, (None, only_chain.chain_value_pools.into())); - prop_assert_eq!(only_chain.blocks.len(), count - 1); - }); + for block in chain.iter().take(count).skip(1).cloned() { + let block = + ContextuallyVerifiedBlock::with_block_and_spent_utxos( + block, + only_chain.unspent_utxos(), + ) + .map_err(|e| (e, chain_values.clone())) + .expect("invalid block value pool change"); + + chain_values.insert(block.height.into(), (block.chain_value_pool_change.into(), None)); + + only_chain = only_chain + .push(block.clone()) + .map_err(|e| (e, chain_values.clone())) + .expect("invalid chain value pools"); + + chain_values.insert(block.height.into(), (block.chain_value_pool_change.into(), only_chain.chain_value_pools.into())); + } + + prop_assert_eq!(only_chain.blocks.len(), count - 1); + }); Ok(()) } diff --git a/zebra-state/src/service/non_finalized_state/tests/vectors.rs b/zebra-state/src/service/non_finalized_state/tests/vectors.rs index 78c5de9d84a..eae0235ea01 100644 --- a/zebra-state/src/service/non_finalized_state/tests/vectors.rs +++ b/zebra-state/src/service/non_finalized_state/tests/vectors.rs @@ -327,7 +327,7 @@ fn shorter_chain_can_be_best_chain_for_network(network: Network) -> Result<()> { state.commit_block(short_chain_block.prepare(), &finalized_state)?; assert_eq!(2, state.chain_set.len()); - assert_eq!(2, state.best_chain_len()); + assert_eq!(Some(2), state.best_chain_len()); Ok(()) } @@ -381,7 +381,7 @@ fn longer_chain_with_more_work_wins_for_network(network: Network) -> Result<()> state.commit_block(short_chain_block.prepare(), &finalized_state)?; assert_eq!(2, state.chain_set.len()); - assert_eq!(5, state.best_chain_len()); + assert_eq!(Some(5), state.best_chain_len()); Ok(()) } diff --git a/zebra-state/src/service/queued_blocks.rs b/zebra-state/src/service/queued_blocks.rs index e383984987b..52424d2aa8a 100644 --- a/zebra-state/src/service/queued_blocks.rs +++ b/zebra-state/src/service/queued_blocks.rs @@ -186,19 +186,19 @@ impl QueuedBlocks { /// Update metrics after the queue is modified fn update_metrics(&self) { if let Some(min_height) = self.by_height.keys().next() { - metrics::gauge!("state.memory.queued.min.height", min_height.0 as f64); + metrics::gauge!("state.memory.queued.min.height").set(min_height.0 as f64); } else { // use f64::NAN as a sentinel value for "None", because 0 is a valid height - metrics::gauge!("state.memory.queued.min.height", f64::NAN); + metrics::gauge!("state.memory.queued.min.height").set(f64::NAN); } if let Some(max_height) = self.by_height.keys().next_back() { - metrics::gauge!("state.memory.queued.max.height", max_height.0 as f64); + metrics::gauge!("state.memory.queued.max.height").set(max_height.0 as f64); } else { // use f64::NAN as a sentinel value for "None", because 0 is a valid height - metrics::gauge!("state.memory.queued.max.height", f64::NAN); + metrics::gauge!("state.memory.queued.max.height").set(f64::NAN); } - metrics::gauge!("state.memory.queued.block.count", self.blocks.len() as f64); + metrics::gauge!("state.memory.queued.block.count").set(self.blocks.len() as f64); } /// Try to look up this UTXO in any queued block. @@ -357,8 +357,8 @@ impl SentHashes { /// Update sent block metrics after a block is sent. fn update_metrics_for_block(&self, height: block::Height) { - metrics::counter!("state.memory.sent.block.count", 1); - metrics::gauge!("state.memory.sent.block.height", height.0 as f64); + metrics::counter!("state.memory.sent.block.count").increment(1); + metrics::gauge!("state.memory.sent.block.height").set(height.0 as f64); self.update_metrics_for_cache(); } @@ -371,30 +371,25 @@ impl SentHashes { .flat_map(|batch| batch.front().map(|(_hash, height)| height)) .min() { - metrics::gauge!("state.memory.sent.cache.min.height", min_height.0 as f64); + metrics::gauge!("state.memory.sent.cache.min.height").set(min_height.0 as f64); } else { // use f64::NAN as a sentinel value for "None", because 0 is a valid height - metrics::gauge!("state.memory.sent.cache.min.height", f64::NAN); + metrics::gauge!("state.memory.sent.cache.min.height").set(f64::NAN); } if let Some(max_height) = batch_iter() .flat_map(|batch| batch.back().map(|(_hash, height)| height)) .max() { - metrics::gauge!("state.memory.sent.cache.max.height", max_height.0 as f64); + metrics::gauge!("state.memory.sent.cache.max.height").set(max_height.0 as f64); } else { // use f64::NAN as a sentinel value for "None", because 0 is a valid height - metrics::gauge!("state.memory.sent.cache.max.height", f64::NAN); + metrics::gauge!("state.memory.sent.cache.max.height").set(f64::NAN); } - metrics::gauge!( - "state.memory.sent.cache.block.count", - batch_iter().flatten().count() as f64, - ); + metrics::gauge!("state.memory.sent.cache.block.count") + .set(batch_iter().flatten().count() as f64); - metrics::gauge!( - "state.memory.sent.cache.batch.count", - batch_iter().count() as f64, - ); + metrics::gauge!("state.memory.sent.cache.batch.count").set(batch_iter().count() as f64); } } diff --git a/zebra-state/src/service/read.rs b/zebra-state/src/service/read.rs index cdee026d875..b8d0223923f 100644 --- a/zebra-state/src/service/read.rs +++ b/zebra-state/src/service/read.rs @@ -28,21 +28,21 @@ mod tests; pub use address::{ balance::transparent_balance, tx_id::transparent_tx_ids, - utxo::{address_utxos, AddressUtxos, ADDRESS_HEIGHTS_FULL_RANGE}, + utxo::{address_utxos, AddressUtxos}, }; pub use block::{ any_utxo, block, block_header, mined_transaction, transaction_hashes_for_block, unspent_utxo, - utxo, }; pub use find::{ - best_tip, block_locator, chain_contains_hash, depth, finalized_state_contains_block_hash, - find_chain_hashes, find_chain_headers, hash_by_height, height_by_hash, next_median_time_past, + best_tip, block_locator, depth, finalized_state_contains_block_hash, find_chain_hashes, + find_chain_headers, hash_by_height, height_by_hash, next_median_time_past, non_finalized_state_contains_block_hash, tip, tip_height, }; pub use tree::{orchard_subtrees, orchard_tree, sapling_subtrees, sapling_tree}; -#[cfg(feature = "getblocktemplate-rpcs")] -pub use difficulty::get_block_template_chain_info; +#[cfg(any(test, feature = "proptest-impl"))] +#[allow(unused_imports)] +pub use address::utxo::ADDRESS_HEIGHTS_FULL_RANGE; /// If a finalized state query is interrupted by a new finalized block, /// retry this many times. diff --git a/zebra-state/src/service/read/tests/vectors.rs b/zebra-state/src/service/read/tests/vectors.rs index 3ff14cb4203..f1f39b6ccc4 100644 --- a/zebra-state/src/service/read/tests/vectors.rs +++ b/zebra-state/src/service/read/tests/vectors.rs @@ -18,10 +18,11 @@ use zebra_test::{ }; use crate::{ + constants::{state_database_format_version_in_code, STATE_DATABASE_KIND}, init_test_services, populated_state, response::MinedTx, service::{ - finalized_state::{DiskWriteBatch, ZebraDb}, + finalized_state::{DiskWriteBatch, ZebraDb, STATE_COLUMN_FAMILIES_IN_CODE}, non_finalized_state::Chain, read::{orchard_subtrees, sapling_subtrees}, }, @@ -122,7 +123,8 @@ async fn test_read_subtrees() -> Result<()> { // Prepare the finalized state. let db = { - let db = ZebraDb::new(&Config::ephemeral(), Mainnet, true); + let db = new_ephemeral_db(); + let db_subtrees = db_height_range.enumerate().map(dummy_subtree); for db_subtree in db_subtrees { let mut db_batch = DiskWriteBatch::new(); @@ -179,7 +181,10 @@ async fn test_read_subtrees() -> Result<()> { .pop_first() .expect("chain_subtrees should not be empty"); assert_eq!(first_chain_index, index, "subtree indexes should match"); - assert_eq!(end_height, subtree.end, "subtree end heights should match"); + assert_eq!( + end_height, subtree.end_height, + "subtree end heights should match" + ); // Check that Zebra retrieves subtrees correctly when using a range with an Excluded start bound @@ -203,7 +208,8 @@ async fn test_sapling_subtrees() -> Result<()> { // Prepare the finalized state. let db_subtree = NoteCommitmentSubtree::new(0, Height(1), dummy_subtree_root); - let db = ZebraDb::new(&Config::ephemeral(), Mainnet, true); + + let db = new_ephemeral_db(); let mut db_batch = DiskWriteBatch::new(); db_batch.insert_sapling_subtree(&db, &db_subtree); db.write(db_batch) @@ -268,7 +274,8 @@ async fn test_orchard_subtrees() -> Result<()> { // Prepare the finalized state. let db_subtree = NoteCommitmentSubtree::new(0, Height(1), dummy_subtree_root); - let db = ZebraDb::new(&Config::ephemeral(), Mainnet, true); + + let db = new_ephemeral_db(); let mut db_batch = DiskWriteBatch::new(); db_batch.insert_orchard_subtree(&db, &db_subtree); db.write(db_batch) @@ -358,3 +365,18 @@ where { index == &subtree.index && subtree_data == &subtree.into_data() } + +/// Returns a new ephemeral database with no consistency checks. +fn new_ephemeral_db() -> ZebraDb { + ZebraDb::new( + &Config::ephemeral(), + STATE_DATABASE_KIND, + &state_database_format_version_in_code(), + Mainnet, + true, + STATE_COLUMN_FAMILIES_IN_CODE + .iter() + .map(ToString::to_string), + false, + ) +} diff --git a/zebra-state/src/service/tests.rs b/zebra-state/src/service/tests.rs index b7e55b9a9d9..0321e1bb965 100644 --- a/zebra-state/src/service/tests.rs +++ b/zebra-state/src/service/tests.rs @@ -143,20 +143,20 @@ async fn test_populated_state_responds_correctly( transcript.push(( Request::FindBlockHashes { known_blocks: known_hashes.iter().rev().cloned().collect(), - stop: next_hashes.get(0).cloned(), + stop: next_hashes.first().cloned(), }, Ok(Response::BlockHashes( - next_hashes.get(0).iter().cloned().cloned().collect(), + next_hashes.first().iter().cloned().cloned().collect(), )), )); transcript.push(( Request::FindBlockHeaders { known_blocks: known_hashes.iter().rev().cloned().collect(), - stop: next_hashes.get(0).cloned(), + stop: next_hashes.first().cloned(), }, Ok(Response::BlockHeaders( - next_headers.get(0).iter().cloned().cloned().collect(), + next_headers.first().iter().cloned().cloned().collect(), )), )); diff --git a/zebra-state/src/service/watch_receiver.rs b/zebra-state/src/service/watch_receiver.rs index 6540ccf98d4..6c4aba0b564 100644 --- a/zebra-state/src/service/watch_receiver.rs +++ b/zebra-state/src/service/watch_receiver.rs @@ -99,15 +99,30 @@ where self.receiver.borrow().clone() } - /// Calls [`watch::Receiver::changed`] and returns the result. + /// Calls [`watch::Receiver::changed()`] and returns the result. + /// Returns when the inner value has been updated, even if the old and new values are equal. /// /// Marks the watched data as seen. pub async fn changed(&mut self) -> Result<(), watch::error::RecvError> { self.receiver.changed().await } + /// Calls [`watch::Receiver::has_changed()`] and returns the result. + /// Returns `true` when the inner value has been updated, even if the old and new values are equal. + /// + /// Does not mark the watched data as seen. + pub fn has_changed(&self) -> Result { + self.receiver.has_changed() + } + /// Marks the watched data as seen. pub fn mark_as_seen(&mut self) { self.receiver.borrow_and_update(); } + + /// Marks the watched data as unseen. + /// Calls [`watch::Receiver::mark_changed()`]. + pub fn mark_changed(&mut self) { + self.receiver.mark_changed(); + } } diff --git a/zebra-state/src/service/write.rs b/zebra-state/src/service/write.rs index cb36b6a2ba5..acbc5c14ce1 100644 --- a/zebra-state/src/service/write.rs +++ b/zebra-state/src/service/write.rs @@ -290,7 +290,11 @@ pub fn write_blocks_from_channels( // Update the caller with the result. let _ = rsp_tx.send(result.clone().map(|()| child_hash).map_err(BoxError::from)); - while non_finalized_state.best_chain_len() > MAX_BLOCK_REORG_HEIGHT { + while non_finalized_state + .best_chain_len() + .expect("just successfully inserted a non-finalized block above") + > MAX_BLOCK_REORG_HEIGHT + { tracing::trace!("finalizing block past the reorg limit"); let contextually_verified_with_trees = non_finalized_state.finalize(); prev_finalized_note_commitment_trees = finalized_state @@ -303,21 +307,16 @@ pub fn write_blocks_from_channels( // Update the metrics if semantic and contextual validation passes // // TODO: split this out into a function? - metrics::counter!("state.full_verifier.committed.block.count", 1); - metrics::counter!("zcash.chain.verified.block.total", 1); + metrics::counter!("state.full_verifier.committed.block.count").increment(1); + metrics::counter!("zcash.chain.verified.block.total").increment(1); - metrics::gauge!( - "state.full_verifier.committed.block.height", - tip_block_height.0 as f64, - ); + metrics::gauge!("state.full_verifier.committed.block.height") + .set(tip_block_height.0 as f64); // This height gauge is updated for both fully verified and checkpoint blocks. // These updates can't conflict, because this block write task makes sure that blocks // are committed in order. - metrics::gauge!( - "zcash.chain.verified.block.height", - tip_block_height.0 as f64, - ); + metrics::gauge!("zcash.chain.verified.block.height").set(tip_block_height.0 as f64); tracing::trace!("finished processing queued block"); } diff --git a/zebra-state/src/tests/setup.rs b/zebra-state/src/tests/setup.rs index 1432e72f368..c5a900c0507 100644 --- a/zebra-state/src/tests/setup.rs +++ b/zebra-state/src/tests/setup.rs @@ -99,6 +99,7 @@ pub(crate) fn new_state_with_mainnet_genesis( true, #[cfg(feature = "elasticsearch")] None, + false, ); let non_finalized_state = NonFinalizedState::new(network); diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index a6fdb52fb78..2d721532682 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-test" -version = "1.0.0-beta.30" +version = "1.0.0-beta.35" authors = ["Zcash Foundation "] description = "Test harnesses and test vectors for Zebra" license = "MIT OR Apache-2.0" @@ -16,18 +16,18 @@ categories = ["command-line-utilities", "cryptography::cryptocurrencies"] [dependencies] hex = "0.4.3" -indexmap = "2.0.1" +indexmap = "2.2.3" lazy_static = "1.4.0" insta = "1.33.0" -itertools = "0.11.0" -proptest = "1.3.1" +itertools = "0.12.1" +proptest = "1.4.0" once_cell = "1.18.0" rand = "0.8.5" -regex = "1.10.2" +regex = "1.10.3" -tokio = { version = "1.33.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.36.0", features = ["full", "tracing", "test-util"] } tower = { version = "0.4.13", features = ["util"] } -futures = "0.3.28" +futures = "0.3.30" color-eyre = "0.6.2" # This is a transitive dependency via color-eyre. @@ -35,13 +35,13 @@ color-eyre = "0.6.2" tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } humantime = "2.1.0" -owo-colors = "3.5.0" +owo-colors = "4.0.0" spandoc = "0.2.2" -thiserror = "1.0.48" +thiserror = "1.0.57" -tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } +tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } tracing-error = "0.2.0" tracing = "0.1.39" [dev-dependencies] -tempfile = "3.8.0" +tempfile = "3.10.0" diff --git a/zebra-test/src/lib.rs b/zebra-test/src/lib.rs index f0d0090bace..196b345d887 100644 --- a/zebra-test/src/lib.rs +++ b/zebra-test/src/lib.rs @@ -1,7 +1,7 @@ //! Miscellaneous test code for Zebra. #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] -#![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_test")] +#![doc(html_root_url = "https://docs.rs/zebra_test")] // Each lazy_static variable uses additional recursion #![recursion_limit = "512"] diff --git a/zebra-test/src/net.rs b/zebra-test/src/net.rs index 69ff9df8b3d..161a5ce80aa 100644 --- a/zebra-test/src/net.rs +++ b/zebra-test/src/net.rs @@ -55,7 +55,7 @@ pub fn zebra_skip_ipv6_tests() -> bool { /// /// If you want a once-off random unallocated port, use /// `random_unallocated_port`. Don't use this function if you don't need -/// to - it has a small risk of port conflcits. +/// to - it has a small risk of port conflicts. /// /// Use this function when you need to use the same random port multiple /// times. For example: setting up both ends of a connection, or re-using diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index f4c7d47bff4..179b7357eb7 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-utils" -version = "1.0.0-beta.30" +version = "1.0.0-beta.35" authors = ["Zcash Foundation "] description = "Developer tools for Zebra maintenance and testing" license = "MIT OR Apache-2.0" @@ -17,7 +17,7 @@ categories = ["command-line-utilities", "cryptography::cryptocurrencies"] # Zebra is only supported on the latest stable Rust version. See the README for details. # Any Zebra release can break compatibility with older Rust versions. -rust-version = "1.70" +rust-version = "1.73" [[bin]] name = "zebra-checkpoints" @@ -36,6 +36,11 @@ name = "block-template-to-proposal" path = "src/bin/block-template-to-proposal/main.rs" required-features = ["getblocktemplate-rpcs"] +[[bin]] +name = "scanning-results-reader" +path = "src/bin/scanning-results-reader/main.rs" +required-features = ["shielded-scan"] + [features] default = [] @@ -61,6 +66,14 @@ getblocktemplate-rpcs = [ "zebra-chain/getblocktemplate-rpcs", ] +shielded-scan = [ + "itertools", + "jsonrpc", + "zcash_primitives", + "zcash_client_backend", + "zebra-scan" +] + [dependencies] color-eyre = "0.6.2" # This is a transitive dependency via color-eyre. @@ -69,24 +82,30 @@ tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } structopt = "0.3.26" hex = "0.4.3" -serde_json = "1.0.107" +serde_json = "1.0.113" tracing-error = "0.2.0" -tracing-subscriber = "0.3.17" -thiserror = "1.0.48" +tracing-subscriber = "0.3.18" +thiserror = "1.0.57" -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.30" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.30" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.35" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.35" } +zebra-scan = { path = "../zebra-scan", version = "0.1.0-alpha.4", optional = true } # These crates are needed for the block-template-to-proposal binary -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.30", optional = true } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.35", optional = true } # These crates are needed for the zebra-checkpoints binary -itertools = { version = "0.11.0", optional = true } +itertools = { version = "0.12.1", optional = true } # These crates are needed for the search-issue-refs binary -regex = { version = "1.10.2", optional = true } +regex = { version = "1.10.3", optional = true } # Avoid default openssl dependency to reduce the dependency tree and security alerts. -reqwest = { version = "0.11.22", default-features = false, features = ["rustls-tls"], optional = true } +reqwest = { version = "0.11.24", default-features = false, features = ["rustls-tls"], optional = true } # These crates are needed for the zebra-checkpoints and search-issue-refs binaries -tokio = { version = "1.33.0", features = ["full"], optional = true } +tokio = { version = "1.36.0", features = ["full"], optional = true } + +jsonrpc = { version = "0.17.0", optional = true } + +zcash_primitives = { version = "0.13.0-rc.1", optional = true } +zcash_client_backend = {version = "0.10.0-rc.1", optional = true} diff --git a/zebra-utils/README.md b/zebra-utils/README.md index e8d625842c7..0e215cc03d7 100644 --- a/zebra-utils/README.md +++ b/zebra-utils/README.md @@ -1,17 +1,16 @@ # Zebra Utilities -This crate contains tools for zebra maintainers. - -## Programs +Tools for maintaining and testing Zebra: - [zebra-checkpoints](#zebra-checkpoints) - [zebrad-hash-lookup](#zebrad-hash-lookup) - [zebrad-log-filter](#zebrad-log-filter) - [zcash-rpc-diff](#zcash-rpc-diff) +- [scanning-results-reader](#scanning-results-reader) Binaries are easier to use if they are located in your system execution path. -### zebra-checkpoints +## zebra-checkpoints This command generates a list of zebra checkpoints, and writes them to standard output. Each checkpoint consists of a block height and hash. @@ -21,7 +20,7 @@ Zebra's GitHub workflows automatically generate checkpoints after every `main` b These checkpoints can be copied into the `main-checkpoints.txt` and `test-checkpoints.txt` files. To find the latest checkpoints on the `main` branch: -1. Find the [latest completed `CI Docker` workflow run on `main`](https://github.com/ZcashFoundation/zebra/actions/workflows/continous-integration-docker.yml?query=branch%3Amain). +1. Find the [latest completed `CI Docker` workflow run on `main`](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-integration-tests-gcp.yml?query=branch%3Amain). Due to GitHub UI issues, some runs will show as waiting, cancelled, or failed, but the checkpoints have still been generated. 2. Go to the `Result of checkpoints-mainnet` step in the @@ -39,10 +38,10 @@ To create checkpoints, you need a synchronized instance of `zebrad` or `zcashd`. #### Checkpoint Generation Setup -Make sure your `zebrad` or `zcashd` is [listening for RPC requests](https://doc.zebra.zfnd.org/zebra_rpc/config/struct.Config.html#structfield.listen_addr), +Make sure your `zebrad` or `zcashd` is [listening for RPC requests](https://doc-internal.zebra.zfnd.org/zebra_rpc/config/struct.Config.html#structfield.listen_addr), and synced to the network tip. -If you are on a Debian system, `zcash-cli` [can be installed as a package](https://zcash.readthedocs.io/en/latest/rtd_pages/install_debian_bin_packages.html). +If you are on a Debian system, `zcash-cli` [can be installed as a package](https://zcash.readthedocs.io/en/master/rtd_pages/install_debian_bin_packages.html). `zebra-checkpoints` is a standalone rust binary, you can compile it using: @@ -81,10 +80,10 @@ For more details about checkpoint lists, see the [`zebra-checkpoints` README.](h To update the testnet checkpoints, `zebra-checkpoints` needs to connect to a testnet node. To launch a testnet node, you can either: -- start `zebrad` [with a `zebrad.toml` with `network.network` set to `Testnet`](https://doc.zebra.zfnd.org/zebra_network/struct.Config.html#structfield.network), or +- start `zebrad` [with a `zebrad.toml` with `network.network` set to `Testnet`](https://docs.rs/zebra-network/latest/zebra_network/struct.Config.html#structfield.network), or - run `zcashd -testnet`. -Then use the commands above to renegerate the checkpoints. +Then use the commands above to regenerate the checkpoints. #### Submit new checkpoints as pull request @@ -93,7 +92,7 @@ Then use the commands above to renegerate the checkpoints. - Open a pull request with the updated Mainnet and Testnet lists at: https://github.com/ZcashFoundation/zebra/pulls -### zebrad-hash-lookup +## zebrad-hash-lookup Given a block hash the script will get additional information using `zcash-cli`. @@ -108,7 +107,7 @@ $ ``` This program is commonly used as part of `zebrad-log-filter` where hashes will be captured from `zebrad` output. -### zebrad-log-filter +## zebrad-log-filter The program is designed to filter the output from the zebra terminal or log file. Each time a hash is seen the script will capture it and get the additional information using `zebrad-hash-lookup`. @@ -127,7 +126,7 @@ next: 00000001436277884eef900772f0fcec9566becccebaab4713fd665b60fab309 ... ``` -### zcash-rpc-diff +## zcash-rpc-diff This program compares `zebrad` and `zcashd` RPC responses. @@ -188,3 +187,48 @@ You can override the binaries the script calls using these environmental variabl - `$ZCASH_CLI` - `$DIFF` - `$JQ` + +## Scanning Results Reader + +A utility for displaying Zebra's scanning results. + +### How It Works + +1. Opens Zebra's scanning storage and reads the results containing scanning keys + and TXIDs. +2. Fetches the transactions by their TXIDs from Zebra using the + `getrawtransaction` RPC. +3. Decrypts the tx outputs using the corresponding scanning key. +4. Prints the memos in the outputs. + +### How to Try It + +#### Scan the Block Chain with Zebra + +1. Follow the [Build & Install](https://zebra.zfnd.org/user/shielded-scan.html#build--install) + and [Configuration](https://zebra.zfnd.org/user/shielded-scan.html#configuration) + instructions in the Zebra Book. + +2. Make sure Zebra runs on Mainnet and listens on the default RPC port by having + the following in the config file: + + ``` toml + [network] + network = 'Mainnet' + + [rpc] + listen_addr = "127.0.0.1:8232" + ``` + +3. Run Zebra with your config file. You can follow the + [Scanning the Block Chain](https://zebra.zfnd.org/user/shielded-scan.html#scanning-the-block-chain) + section in the book for more details. + +#### Run the Reader + +4. To print the memos in outputs decryptable by the provided scanning keys, run + the reader while also running Zebra. For example: + + ``` bash + cargo run --release --features shielded-scan --bin scanning-results-reader + ``` diff --git a/zebra-utils/src/bin/scanning-results-reader/main.rs b/zebra-utils/src/bin/scanning-results-reader/main.rs new file mode 100644 index 00000000000..b12892b7b9f --- /dev/null +++ b/zebra-utils/src/bin/scanning-results-reader/main.rs @@ -0,0 +1,113 @@ +//! Displays Zebra's scanning results: +//! +//! 1. Opens Zebra's scanning storage and reads the results containing scanning keys and TXIDs. +//! 2. Fetches the transactions by their TXIDs from Zebra using the `getrawtransaction` RPC. +//! 3. Decrypts the tx outputs using the corresponding scanning key. +//! 4. Prints the memos in the outputs. + +use std::collections::HashMap; + +use hex::ToHex; +use itertools::Itertools; +use jsonrpc::simple_http::SimpleHttpTransport; +use jsonrpc::Client; + +use zcash_client_backend::decrypt_transaction; +use zcash_client_backend::keys::UnifiedFullViewingKey; +use zcash_primitives::consensus::{BlockHeight, BranchId}; +use zcash_primitives::transaction::Transaction; +use zcash_primitives::zip32::AccountId; + +use zebra_scan::scan::sapling_key_to_scan_block_keys; +use zebra_scan::{storage::Storage, Config}; + +/// Prints the memos of transactions from Zebra's scanning results storage. +/// +/// Reads the results storage, iterates through all decrypted memos, and prints the them to standard +/// output. Filters out some frequent and uninteresting memos typically associated with ZECPages. +/// +/// Notes: +/// +/// - `#[allow(clippy::print_stdout)]` is set to allow usage of `println!` for displaying the memos. +/// - This function expects Zebra's RPC server to be available. +/// +/// # Panics +/// +/// When: +/// +/// - The Sapling key from the storage is not valid. +/// - There is no diversifiable full viewing key (dfvk) available. +/// - The RPC response cannot be decoded from a hex string to bytes. +/// - The transaction fetched via RPC cannot be deserialized from raw bytes. +#[allow(clippy::print_stdout)] +pub fn main() { + let network = zcash_primitives::consensus::Network::MainNetwork; + let storage = Storage::new(&Config::default(), network.into(), true); + // If the first memo is empty, it doesn't get printed. But we never print empty memos anyway. + let mut prev_memo = "".to_owned(); + + for (key, _) in storage.sapling_keys_last_heights().iter() { + let dfvk = sapling_key_to_scan_block_keys(key, network.into()) + .expect("Scanning key from the storage should be valid") + .0 + .into_iter() + .exactly_one() + .expect("There should be exactly one dfvk"); + + let ufvk_with_acc_id = HashMap::from([( + AccountId::from(1), + UnifiedFullViewingKey::new(Some(dfvk), None).expect("`dfvk` should be `Some`"), + )]); + + for (height, txids) in storage.sapling_results(key) { + let height = BlockHeight::from(height); + + for txid in txids.iter() { + let tx = Transaction::read( + &hex::decode(&fetch_tx_via_rpc(txid.encode_hex())) + .expect("RPC response should be decodable from hex string to bytes")[..], + BranchId::for_height(&network, height), + ) + .expect("TX fetched via RPC should be deserializable from raw bytes"); + + for output in decrypt_transaction(&network, height, &tx, &ufvk_with_acc_id) { + let memo = memo_bytes_to_string(output.memo.as_array()); + + if !memo.is_empty() + // Filter out some uninteresting and repeating memos from ZECPages. + && !memo.contains("LIKE:") + && !memo.contains("VOTE:") + && memo != prev_memo + { + println!("{memo}\n"); + prev_memo = memo; + } + } + } + } + } +} + +/// Trims trailing zeroes from a memo, and returns the memo as a [`String`]. +fn memo_bytes_to_string(memo: &[u8; 512]) -> String { + match memo.iter().rposition(|&byte| byte != 0) { + Some(i) => String::from_utf8_lossy(&memo[..=i]).into_owned(), + None => "".to_owned(), + } +} + +/// Uses the `getrawtransaction` RPC to retrieve a transaction by its TXID. +fn fetch_tx_via_rpc(txid: String) -> String { + let client = Client::with_transport( + SimpleHttpTransport::builder() + .url("127.0.0.1:8232") + .expect("Zebra's URL should be valid") + .build(), + ); + + client + .send_request(client.build_request("getrawtransaction", Some(&jsonrpc::arg([txid])))) + .expect("Sending the `getrawtransaction` request should succeed") + .result() + .expect("Zebra's RPC response should contain a valid result") +} diff --git a/zebra-utils/src/bin/zebra-checkpoints/main.rs b/zebra-utils/src/bin/zebra-checkpoints/main.rs index a8d2d4e3e60..cff158b5ca2 100644 --- a/zebra-utils/src/bin/zebra-checkpoints/main.rs +++ b/zebra-utils/src/bin/zebra-checkpoints/main.rs @@ -164,8 +164,7 @@ async fn main() -> Result<()> { // Checkpoints must be on the main chain, so we skip blocks that are within the // Zcash reorg limit. - let height_limit = height_limit - - HeightDiff::try_from(MIN_TRANSPARENT_COINBASE_MATURITY).expect("constant fits in i32"); + let height_limit = height_limit - HeightDiff::from(MIN_TRANSPARENT_COINBASE_MATURITY); let height_limit = height_limit .ok_or_else(|| { eyre!( diff --git a/zebra-utils/src/lib.rs b/zebra-utils/src/lib.rs index 207e094939a..ea987abf6bd 100644 --- a/zebra-utils/src/lib.rs +++ b/zebra-utils/src/lib.rs @@ -1,7 +1,7 @@ //! Utilities for Zebra development, not for library or application users. #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] -#![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_utils")] +#![doc(html_root_url = "https://docs.rs/zebra_utils")] use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index d4ff120e793..b6597d279e6 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -1,7 +1,7 @@ [package] # Crate metadata name = "zebrad" -version = "1.3.0" +version = "1.6.0" authors = ["Zcash Foundation "] description = "The Zcash Foundation's independent, consensus-compatible implementation of a Zcash node" license = "MIT OR Apache-2.0" @@ -19,7 +19,7 @@ edition = "2021" # Zebra is only supported on the latest stable Rust version. See the README for details. # Any Zebra release can break compatibility with older Rust versions. -rust-version = "1.70" +rust-version = "1.73" # Settings that impact runtime behaviour @@ -68,6 +68,18 @@ getblocktemplate-rpcs = [ "zebra-chain/getblocktemplate-rpcs", ] +# Experimental internal miner support +internal-miner = [ + "thread-priority", + "zebra-chain/internal-miner", + # TODO: move common code into zebra-chain or zebra-node-services and remove the RPC dependency + "zebra-rpc/internal-miner", + "zebra-rpc/getblocktemplate-rpcs", +] + +# Experimental shielded blockchain scanning +shielded-scan = ["zebra-scan"] + # Experimental elasticsearch indexing elasticsearch = [ "zebra-state/elasticsearch", @@ -115,6 +127,7 @@ proptest-impl = [ "zebra-state/proptest-impl", "zebra-network/proptest-impl", "zebra-chain/proptest-impl", + "zebra-scan?/proptest-impl", ] # Build the zebra-checkpoints utility for checkpoint generation tests @@ -145,68 +158,75 @@ test_sync_past_mandatory_checkpoint_mainnet = [] test_sync_past_mandatory_checkpoint_testnet = [] [dependencies] -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.30" } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.30" } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.30" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.30" } -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.30" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.30" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.35" } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.35" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.35" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.35" } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.35" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.35" } + +# Experimental shielded-scan feature +zebra-scan = { path = "../zebra-scan", version = "0.1.0-alpha.4", optional = true } # Required for crates.io publishing, but it's only used in tests -zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.30", optional = true } +zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.35", optional = true } abscissa_core = "0.7.0" -clap = { version = "4.4.6", features = ["cargo"] } -chrono = { version = "0.4.31", default-features = false, features = ["clock", "std"] } +clap = { version = "4.5.1", features = ["cargo"] } +chrono = { version = "0.4.34", default-features = false, features = ["clock", "std"] } humantime-serde = "1.1.1" -indexmap = "2.0.1" +indexmap = "2.2.3" lazy_static = "1.4.0" -semver = "1.0.20" -serde = { version = "1.0.188", features = ["serde_derive"] } -toml = "0.8.3" - -futures = "0.3.28" -rayon = "1.7.0" -tokio = { version = "1.33.0", features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] } +semver = "1.0.22" +serde = { version = "1.0.196", features = ["serde_derive"] } +toml = "0.8.10" + +futures = "0.3.30" +rayon = "1.8.1" +tokio = { version = "1.36.0", features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] } +tokio-stream = { version = "0.1.14", features = ["time"] } tower = { version = "0.4.13", features = ["hedge", "limit"] } -pin-project = "1.1.3" +pin-project = "1.1.4" color-eyre = { version = "0.6.2", default-features = false, features = ["issue-url"] } # This is a transitive dependency via color-eyre. # Enable a feature that makes tinyvec compile much faster. tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } -thiserror = "1.0.48" +thiserror = "1.0.57" -tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } -tracing-appender = "0.2.2" +tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } +tracing-appender = "0.2.3" tracing-error = "0.2.0" tracing-futures = "0.2.5" tracing = "0.1.39" -metrics = "0.21.1" +metrics = "0.22.1" dirs = "5.0.1" atty = "0.2.14" -num-integer = "0.1.45" +num-integer = "0.1.46" rand = "0.8.5" +# prod feature internal-miner +thread-priority = { version = "0.15.1", optional = true } + # prod feature sentry -sentry = { version = "0.31.7", default-features = false, features = ["backtrace", "contexts", "reqwest", "rustls", "tracing"], optional = true } +sentry = { version = "0.32.2", default-features = false, features = ["backtrace", "contexts", "reqwest", "rustls", "tracing"], optional = true } # prod feature flamegraph tracing-flame = { version = "0.2.0", optional = true } -inferno = { version = "0.11.17", default-features = false, optional = true } +inferno = { version = "0.11.19", default-features = false, optional = true } # prod feature journald tracing-journald = { version = "0.3.0", optional = true } # prod feature filter-reload -hyper = { version = "0.14.27", features = ["http1", "http2", "server"], optional = true } +hyper = { version = "0.14.28", features = ["http1", "http2", "server"], optional = true } # prod feature prometheus -metrics-exporter-prometheus = { version = "0.12.0", default-features = false, features = ["http-listener"], optional = true } +metrics-exporter-prometheus = { version = "0.13.1", default-features = false, features = ["http-listener"], optional = true } # prod feature release_max_level_info # @@ -216,20 +236,20 @@ log = "0.4.20" # prod feature progress-bar howudoin = { version = "0.1.2", features = ["term-line"], optional = true } -indicatif = { version = "0.17.6", optional = true } +indicatif = { version = "0.17.8", optional = true } # test feature proptest-impl -proptest = { version = "1.3.1", optional = true } +proptest = { version = "1.4.0", optional = true } proptest-derive = { version = "0.4.0", optional = true } # test feature tokio-console console-subscriber = { version = "0.2.0", optional = true } [build-dependencies] -vergen = { version = "8.2.5", default-features = false, features = ["cargo", "git", "git2", "rustc"] } +vergen = { version = "8.3.1", default-features = false, features = ["cargo", "git", "git2", "rustc"] } # test feature lightwalletd-grpc-tests -tonic-build = { version = "0.10.2", optional = true } +tonic-build = { version = "0.11.0", optional = true } [dev-dependencies] abscissa_core = { version = "0.7.0", features = ["testing"] } @@ -237,37 +257,39 @@ hex = "0.4.3" hex-literal = "0.4.1" jsonrpc-core = "18.0.0" once_cell = "1.18.0" -regex = "1.10.2" +regex = "1.10.3" insta = { version = "1.33.0", features = ["json"] } # zebra-rpc needs the preserve_order feature, it also makes test results more stable -serde_json = { version = "1.0.107", features = ["preserve_order"] } -tempfile = "3.8.0" +serde_json = { version = "1.0.113", features = ["preserve_order"] } +tempfile = "3.10.0" -hyper = { version = "0.14.27", features = ["http1", "http2", "server"]} +hyper = { version = "0.14.28", features = ["http1", "http2", "server"]} tracing-test = { version = "0.2.4", features = ["no-env-filter"] } -tokio = { version = "1.33.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.36.0", features = ["full", "tracing", "test-util"] } tokio-stream = "0.1.14" # test feature lightwalletd-grpc-tests -prost = "0.12.1" -tonic = "0.10.2" +prost = "0.12.2" +tonic = "0.11.0" -proptest = "1.3.1" +proptest = "1.4.0" proptest-derive = "0.4.0" # enable span traces and track caller in tests color-eyre = { version = "0.6.2" } -zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } -zebra-consensus = { path = "../zebra-consensus", features = ["proptest-impl"] } -zebra-network = { path = "../zebra-network", features = ["proptest-impl"] } -zebra-state = { path = "../zebra-state", features = ["proptest-impl"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.35", features = ["proptest-impl"] } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.35", features = ["proptest-impl"] } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.35", features = ["proptest-impl"] } +zebra-scan = { path = "../zebra-scan", version = "0.1.0-alpha.4", features = ["proptest-impl"] } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.35", features = ["proptest-impl"] } -zebra-node-services = { path = "../zebra-node-services", features = ["rpc-client"] } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.35", features = ["rpc-client"] } -zebra-test = { path = "../zebra-test" } +zebra-test = { path = "../zebra-test", version = "1.0.0-beta.35" } +zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.2" } # Used by the checkpoint generation tests via the zebra-checkpoints feature # (the binaries in this crate won't be built unless their features are enabled). @@ -278,4 +300,4 @@ zebra-test = { path = "../zebra-test" } # When `-Z bindeps` is stabilised, enable this binary dependency instead: # https://github.com/rust-lang/cargo/issues/9096 # zebra-utils { path = "../zebra-utils", artifact = "bin:zebra-checkpoints" } -zebra-utils = { path = "../zebra-utils" } +zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.35" } diff --git a/zebrad/src/application.rs b/zebrad/src/application.rs index d8df9d3f009..96264f2f051 100644 --- a/zebrad/src/application.rs +++ b/zebrad/src/application.rs @@ -1,4 +1,7 @@ //! Zebrad Abscissa Application +//! +//! This is the code that starts `zebrad`, and launches its tasks and services. +//! See [the crate docs](crate) and [the start docs](crate::commands::start) for more details. use std::{env, fmt::Write as _, io::Write as _, process, sync::Arc}; @@ -13,7 +16,8 @@ use semver::{BuildMetadata, Version}; use zebra_network::constants::PORT_IN_USE_ERROR; use zebra_state::{ - constants::LOCK_FILE_ERROR, database_format_version_in_code, database_format_version_on_disk, + constants::LOCK_FILE_ERROR, state_database_format_version_in_code, + state_database_format_version_on_disk, }; use crate::{ @@ -74,9 +78,7 @@ fn vergen_build_version() -> Option { // - optional pre-release: `-`tag[`.`tag ...] // - optional build: `+`tag[`.`tag ...] // change the git describe format to the semver 2.0 format - let Some(vergen_git_describe) = VERGEN_GIT_DESCRIBE else { - return None; - }; + let vergen_git_describe = VERGEN_GIT_DESCRIBE?; // `git describe` uses "dirty" for uncommitted changes, // but users won't understand what that means. @@ -86,10 +88,7 @@ fn vergen_build_version() -> Option { let mut vergen_git_describe = vergen_git_describe.split('-').peekable(); // Check the "version core" part. - let version = vergen_git_describe.next(); - let Some(mut version) = version else { - return None; - }; + let mut version = vergen_git_describe.next()?; // strip the leading "v", if present. version = version.strip_prefix('v').unwrap_or(version); @@ -264,7 +263,7 @@ impl Application for ZebradApp { // reads state disk version file, doesn't open RocksDB database let disk_db_version = - match database_format_version_on_disk(&config.state, config.network.network) { + match state_database_format_version_on_disk(&config.state, config.network.network) { Ok(Some(version)) => version.to_string(), // This "version" is specially formatted to match a relaxed version regex in CI Ok(None) => "creating.new.database".to_string(), @@ -283,7 +282,7 @@ impl Application for ZebradApp { // code constant ( "running state version", - database_format_version_in_code().to_string(), + state_database_format_version_in_code().to_string(), ), // state disk file, doesn't open database ("initial disk state version", disk_db_version), diff --git a/zebrad/src/commands.rs b/zebrad/src/commands.rs index d7f4fa337be..9751f8693e1 100644 --- a/zebrad/src/commands.rs +++ b/zebrad/src/commands.rs @@ -1,27 +1,26 @@ //! Zebrad Subcommands +use std::path::PathBuf; + +use abscissa_core::{config::Override, Command, Configurable, FrameworkError, Runnable}; + +use crate::config::ZebradConfig; + +pub use self::{entry_point::EntryPoint, start::StartCmd}; + +use self::{copy_state::CopyStateCmd, generate::GenerateCmd, tip_height::TipHeightCmd}; + +pub mod start; + mod copy_state; -mod download; mod entry_point; mod generate; -mod start; mod tip_height; #[cfg(test)] mod tests; -use self::ZebradCmd::*; -use self::{ - copy_state::CopyStateCmd, download::DownloadCmd, generate::GenerateCmd, - tip_height::TipHeightCmd, -}; - -pub use self::{entry_point::EntryPoint, start::StartCmd}; - -use crate::config::ZebradConfig; - -use abscissa_core::{config::Override, Command, Configurable, FrameworkError, Runnable}; -use std::path::PathBuf; +use ZebradCmd::*; /// Zebrad Configuration Filename pub const CONFIG_FILE: &str = "zebrad.toml"; @@ -33,10 +32,6 @@ pub enum ZebradCmd { // TODO: hide this command from users in release builds (#3279) CopyState(CopyStateCmd), - // The `download` subcommand - /// Pre-download required Zcash Sprout and Sapling parameter files - Download(DownloadCmd), - /// Generate a default `zebrad.toml` configuration Generate(GenerateCmd), @@ -60,7 +55,7 @@ impl ZebradCmd { CopyState(_) | Start(_) => true, // Utility commands that don't use server components - Download(_) | Generate(_) | TipHeight(_) => false, + Generate(_) | TipHeight(_) => false, } } @@ -74,14 +69,14 @@ impl ZebradCmd { Start(_) => true, // Utility commands - CopyState(_) | Download(_) | Generate(_) | TipHeight(_) => false, + CopyState(_) | Generate(_) | TipHeight(_) => false, } } /// Returns true if this command should ignore errors when /// attempting to load a config file. pub(crate) fn should_ignore_load_config_error(&self) -> bool { - matches!(self, ZebradCmd::Generate(_) | ZebradCmd::Download(_)) + matches!(self, ZebradCmd::Generate(_)) } /// Returns the default log level for this command, based on the `verbose` command line flag. @@ -96,7 +91,7 @@ impl ZebradCmd { Generate(_) | TipHeight(_) => true, // Commands that generate informative logging output by default. - CopyState(_) | Download(_) | Start(_) => false, + CopyState(_) | Start(_) => false, }; if only_show_warnings && !verbose { @@ -113,7 +108,6 @@ impl Runnable for ZebradCmd { fn run(&self) { match self { CopyState(cmd) => cmd.run(), - Download(cmd) => cmd.run(), Generate(cmd) => cmd.run(), Start(cmd) => cmd.run(), TipHeight(cmd) => cmd.run(), diff --git a/zebrad/src/commands/download.rs b/zebrad/src/commands/download.rs deleted file mode 100644 index 4feefcb9e58..00000000000 --- a/zebrad/src/commands/download.rs +++ /dev/null @@ -1,35 +0,0 @@ -//! `download` subcommand - pre-download required parameter files -//! -//! `zebrad download` automatically downloads required parameter files the first time it is run. -//! -//! This command should be used if you're launching lots of `zebrad start` instances for testing, -//! or you want to include the parameter files in a distribution package. - -use abscissa_core::{Command, Runnable}; - -/// Pre-download required Zcash Sprout and Sapling parameter files -#[derive(Command, Debug, Default, clap::Parser)] -pub struct DownloadCmd {} - -impl DownloadCmd { - /// Download the Sapling and Sprout Groth16 parameters if needed, - /// check they were downloaded correctly, and load them into Zebra. - /// - /// # Panics - /// - /// If the downloaded or pre-existing parameter files are invalid. - fn download_and_check(&self) { - // The lazy static initializer does the download, if needed, - // and the file hash checks. - lazy_static::initialize(&zebra_consensus::groth16::GROTH16_PARAMETERS); - } -} - -impl Runnable for DownloadCmd { - /// Run the download command. - fn run(&self) { - info!("checking if Zcash Sapling and Sprout parameters have been downloaded"); - - self.download_and_check(); - } -} diff --git a/zebrad/src/commands/generate.rs b/zebrad/src/commands/generate.rs index de9a3019c53..34a26da37d8 100644 --- a/zebrad/src/commands/generate.rs +++ b/zebrad/src/commands/generate.rs @@ -37,13 +37,22 @@ impl Runnable for GenerateCmd { # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # # 1. The -c flag on the command line, e.g., `zebrad -c myconfig.toml start`; # 2. The file `zebrad.toml` in the users's preference directory (platform-dependent); # 3. The default config. +# +# The user's preference directory and the default path to the `zebrad` config are platform dependent, +# based on `dirs::preference_dir`, see https://docs.rs/dirs/latest/dirs/fn.preference_dir.html : +# +# | Platform | Value | Example | +# | -------- | ------------------------------------- | ---------------------------------------------- | +# | Linux | `$XDG_CONFIG_HOME` or `$HOME/.config` | `/home/alice/.config/zebrad.toml` | +# | macOS | `$HOME/Library/Preferences` | `/Users/Alice/Library/Preferences/zebrad.toml` | +# | Windows | `{FOLDERID_RoamingAppData}` | `C:\Users\Alice\AppData\Local\zebrad.toml` | " .to_owned(); diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index 3f44525b22a..0e113e974bd 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -45,6 +45,16 @@ //! * Progress Task //! * logs progress towards the chain tip //! +//! Shielded Scanning: +//! * Shielded Scanner Task +//! * if the user has configured Zebra with their shielded viewing keys, scans new and existing +//! blocks for transactions that use those keys +//! +//! Block Mining: +//! * Internal Miner Task +//! * if the user has configured Zebra to mine blocks, spawns tasks to generate new blocks, +//! and submits them for verification. This automatically shares these new blocks with peers. +//! //! Mempool Transactions: //! * Mempool Service //! * activates when the syncer is near the chain tip @@ -160,7 +170,6 @@ impl StartCmd { config.consensus.clone(), config.network.network, state.clone(), - config.consensus.debug_skip_parameter_preload, ) .await; @@ -196,7 +205,7 @@ impl StartCmd { block_download_peer_set: peer_set.clone(), block_verifier: block_verifier_router.clone(), mempool: mempool.clone(), - state, + state: state.clone(), latest_chain_tip: latest_chain_tip.clone(), }; setup_tx @@ -221,10 +230,10 @@ impl StartCmd { build_version(), user_agent(), mempool.clone(), - read_only_state_service, - block_verifier_router, + read_only_state_service.clone(), + block_verifier_router.clone(), sync_status.clone(), - address_book, + address_book.clone(), latest_chain_tip.clone(), config.network.network, ); @@ -250,8 +259,10 @@ impl StartCmd { ); info!("spawning delete old databases task"); - let mut old_databases_task_handle = - zebra_state::check_and_delete_old_databases(config.state.clone()); + let mut old_databases_task_handle = zebra_state::check_and_delete_old_state_databases( + &config.state, + config.network.network, + ); info!("spawning progress logging task"); let progress_task_handle = tokio::spawn( @@ -263,9 +274,11 @@ impl StartCmd { .in_current_span(), ); + // Spawn never ending end of support task. info!("spawning end of support checking task"); let end_of_support_task_handle = tokio::spawn( - sync::end_of_support::start(config.network.network, latest_chain_tip).in_current_span(), + sync::end_of_support::start(config.network.network, latest_chain_tip.clone()) + .in_current_span(), ); // Give the inbound service more time to clear its queue, @@ -279,13 +292,58 @@ impl StartCmd { &config.mempool, peer_set, mempool.clone(), - sync_status, - chain_tip_change, + sync_status.clone(), + chain_tip_change.clone(), ); info!("spawning syncer task"); let syncer_task_handle = tokio::spawn(syncer.sync().in_current_span()); + #[cfg(feature = "shielded-scan")] + // Spawn never ending scan task only if we have keys to scan for. + let scan_task_handle = { + // TODO: log the number of keys and update the scan_task_starts() test + info!("spawning shielded scanner with configured viewing keys"); + zebra_scan::spawn_init( + config.shielded_scan.clone(), + config.network.network, + state, + chain_tip_change, + ) + }; + + #[cfg(not(feature = "shielded-scan"))] + // Spawn a dummy scan task which doesn't do anything and never finishes. + let scan_task_handle: tokio::task::JoinHandle> = + tokio::spawn(std::future::pending().in_current_span()); + + // And finally, spawn the internal Zcash miner, if it is enabled. + // + // TODO: add a config to enable the miner rather than a feature. + #[cfg(feature = "internal-miner")] + let miner_task_handle = if config.mining.is_internal_miner_enabled() { + info!("spawning Zcash miner"); + let rpc = zebra_rpc::methods::get_block_template_rpcs::GetBlockTemplateRpcImpl::new( + config.network.network, + config.mining.clone(), + mempool, + read_only_state_service, + latest_chain_tip, + block_verifier_router, + sync_status, + address_book, + ); + + crate::components::miner::spawn_init(&config.mining, rpc) + } else { + tokio::spawn(std::future::pending().in_current_span()) + }; + + #[cfg(not(feature = "internal-miner"))] + // Spawn a dummy miner task which doesn't do anything and never finishes. + let miner_task_handle: tokio::task::JoinHandle> = + tokio::spawn(std::future::pending().in_current_span()); + info!("spawned initial Zebra tasks"); // TODO: put tasks into an ongoing FuturesUnordered and a startup FuturesUnordered? @@ -300,16 +358,14 @@ impl StartCmd { pin!(tx_gossip_task_handle); pin!(progress_task_handle); pin!(end_of_support_task_handle); + pin!(scan_task_handle); + pin!(miner_task_handle); // startup tasks let BackgroundTaskHandles { - mut groth16_download_handle, mut state_checkpoint_verify_handle, } = consensus_task_handles; - let groth16_download_handle_fused = (&mut groth16_download_handle).fuse(); - pin!(groth16_download_handle_fused); - let state_checkpoint_verify_handle_fused = (&mut state_checkpoint_verify_handle).fuse(); pin!(state_checkpoint_verify_handle_fused); @@ -371,19 +427,6 @@ impl StartCmd { .expect("unexpected panic in the end of support task") .map(|_| info!("end of support task exited")), - - // Unlike other tasks, we expect the download task to finish while Zebra is running. - groth16_download_result = &mut groth16_download_handle_fused => { - groth16_download_result - .unwrap_or_else(|_| panic!( - "unexpected panic in the Groth16 pre-download and check task. {}", - zebra_consensus::groth16::Groth16Parameters::failure_hint()) - ); - - exit_when_task_finishes = false; - Ok(()) - } - // We also expect the state checkpoint verify task to finish. state_checkpoint_verify_result = &mut state_checkpoint_verify_handle_fused => { state_checkpoint_verify_result @@ -403,6 +446,14 @@ impl StartCmd { exit_when_task_finishes = false; Ok(()) } + + scan_result = &mut scan_task_handle => scan_result + .expect("unexpected panic in the scan task") + .map(|_| info!("scan task exited")), + + miner_result = &mut miner_task_handle => miner_result + .expect("unexpected panic in the miner task") + .map(|_| info!("miner task exited")), }; // Stop Zebra if a task finished and returned an error, @@ -428,9 +479,10 @@ impl StartCmd { tx_gossip_task_handle.abort(); progress_task_handle.abort(); end_of_support_task_handle.abort(); + scan_task_handle.abort(); + miner_task_handle.abort(); // startup tasks - groth16_download_handle.abort(); state_checkpoint_verify_handle.abort(); old_databases_task_handle.abort(); diff --git a/zebrad/src/components.rs b/zebrad/src/components.rs index 5daf65a4a79..43b051f1209 100644 --- a/zebrad/src/components.rs +++ b/zebrad/src/components.rs @@ -16,5 +16,8 @@ pub mod tokio; #[allow(missing_docs)] pub mod tracing; +#[cfg(feature = "internal-miner")] +pub mod miner; + pub use inbound::Inbound; pub use sync::ChainSync; diff --git a/zebrad/src/components/inbound.rs b/zebrad/src/components/inbound.rs index e93aa8517f0..cfc7008e0bd 100644 --- a/zebrad/src/components/inbound.rs +++ b/zebrad/src/components/inbound.rs @@ -14,12 +14,10 @@ use std::{ time::Duration, }; -use chrono::Utc; use futures::{ future::{FutureExt, TryFutureExt}, stream::Stream, }; -use num_integer::div_ceil; use tokio::sync::oneshot::{self, error::TryRecvError}; use tower::{buffer::Buffer, timeout::Timeout, util::BoxService, Service, ServiceExt}; @@ -32,10 +30,7 @@ use zebra_chain::{ transaction::UnminedTxId, }; use zebra_consensus::router::RouterError; -use zebra_network::{ - constants::{ADDR_RESPONSE_LIMIT_DENOMINATOR, MAX_ADDRS_IN_MESSAGE}, - AddressBook, InventoryResponse, -}; +use zebra_network::{AddressBook, InventoryResponse}; use zebra_node_services::mempool; use crate::BoxError; @@ -45,8 +40,11 @@ use super::sync::{BLOCK_DOWNLOAD_TIMEOUT, BLOCK_VERIFY_TIMEOUT}; use InventoryResponse::*; +mod cached_peer_addr_response; pub(crate) mod downloads; +use cached_peer_addr_response::CachedPeerAddrResponse; + #[cfg(test)] mod tests; @@ -135,8 +133,12 @@ pub enum Setup { Initialized { // Services // - /// A shared list of peer addresses. - address_book: Arc>, + /// An owned partial list of peer addresses used as a `GetAddr` response, and + /// a shared list of peer addresses used to periodically refresh the partial list. + /// + /// Refreshed from the address book in `poll_ready` method + /// after [`CACHED_ADDRS_REFRESH_INTERVAL`](cached_peer_addr_response::CACHED_ADDRS_REFRESH_INTERVAL). + cached_peer_addr_response: CachedPeerAddrResponse, /// A `futures::Stream` that downloads and verifies gossiped blocks. block_downloads: Pin>, @@ -261,6 +263,8 @@ impl Service for Inbound { latest_chain_tip, } = setup_data; + let cached_peer_addr_response = CachedPeerAddrResponse::new(address_book); + let block_downloads = Box::pin(BlockDownloads::new( full_verify_concurrency_limit, Timeout::new(block_download_peer_set, BLOCK_DOWNLOAD_TIMEOUT), @@ -271,14 +275,18 @@ impl Service for Inbound { result = Ok(()); Setup::Initialized { - address_book, + cached_peer_addr_response, block_downloads, mempool, state, } } Err(TryRecvError::Empty) => { - // There's no setup data yet, so keep waiting for it + // There's no setup data yet, so keep waiting for it. + // + // We could use Future::poll() to get a waker and return Poll::Pending here. + // But we want to drop excess requests during startup instead. Otherwise, + // the inbound service gets overloaded, and starts disconnecting peers. result = Ok(()); Setup::Pending { full_verify_concurrency_limit, @@ -302,17 +310,22 @@ impl Service for Inbound { } // Clean up completed download tasks, ignoring their results Setup::Initialized { - address_book, + cached_peer_addr_response, mut block_downloads, mempool, state, } => { + // # Correctness + // + // Clear the stream but ignore the final Pending return value. + // If we returned Pending here, and there were no waiting block downloads, + // then inbound requests would wait for the next block download, and hang forever. while let Poll::Ready(Some(_)) = block_downloads.as_mut().poll_next(cx) {} result = Ok(()); Setup::Initialized { - address_book, + cached_peer_addr_response, block_downloads, mempool, state, @@ -343,13 +356,13 @@ impl Service for Inbound { /// and will cause callers to disconnect from the remote peer. #[instrument(name = "inbound", skip(self, req))] fn call(&mut self, req: zn::Request) -> Self::Future { - let (address_book, block_downloads, mempool, state) = match &mut self.setup { + let (cached_peer_addr_response, block_downloads, mempool, state) = match &mut self.setup { Setup::Initialized { - address_book, + cached_peer_addr_response, block_downloads, mempool, state, - } => (address_book, block_downloads, mempool, state), + } => (cached_peer_addr_response, block_downloads, mempool, state), _ => { debug!("ignoring request from remote peer during setup"); return async { Ok(zn::Response::Nil) }.boxed(); @@ -366,33 +379,13 @@ impl Service for Inbound { // // # Correctness // - // Briefly hold the address book threaded mutex while - // cloning the address book. Then sanitize in the future, - // after releasing the lock. - let peers = address_book.lock().unwrap().clone(); + // If the address book is busy, try again inside the future. If it can't be locked + // twice, ignore the request. + cached_peer_addr_response.try_refresh(); + let response = cached_peer_addr_response.value(); async move { - // Correctness: get the current time after acquiring the address book lock. - // - // This time is used to filter outdated peers, so it doesn't really matter - // if we get it when the future is created, or when it starts running. - let now = Utc::now(); - - // Send a sanitized response - let mut peers = peers.sanitized(now); - - // Truncate the list - let address_limit = div_ceil(peers.len(), ADDR_RESPONSE_LIMIT_DENOMINATOR); - let address_limit = MAX_ADDRS_IN_MESSAGE.min(address_limit); - peers.truncate(address_limit); - - if peers.is_empty() { - // We don't know if the peer response will be empty until we've sanitized them. - debug!("ignoring `Peers` request from remote peer because our address book is empty"); - Ok(zn::Response::Nil) - } else { - Ok(zn::Response::Peers(peers)) - } + Ok(response) }.boxed() } zn::Request::BlocksByHash(hashes) => { diff --git a/zebrad/src/components/inbound/cached_peer_addr_response.rs b/zebrad/src/components/inbound/cached_peer_addr_response.rs new file mode 100644 index 00000000000..8e95f16f89e --- /dev/null +++ b/zebrad/src/components/inbound/cached_peer_addr_response.rs @@ -0,0 +1,98 @@ +//! Periodically-refreshed GetAddr response for the inbound service. +//! +//! Used to avoid giving out Zebra's entire address book over a short duration. + +use std::{ + sync::{Mutex, TryLockError}, + time::Instant, +}; + +use super::*; + +/// The minimum duration that a `CachedPeerAddrResponse` is considered fresh before the inbound service +/// should get new peer addresses from the address book to send as a `GetAddr` response. +/// +/// Cached responses are considered stale and should be cleared after twice this duration. +pub const CACHED_ADDRS_REFRESH_INTERVAL: Duration = Duration::from_secs(10 * 60); + +/// Caches and refreshes a partial list of peer addresses to be returned as a `GetAddr` response. +pub struct CachedPeerAddrResponse { + /// A shared list of peer addresses. + address_book: Arc>, + + /// An owned list of peer addresses used as a `GetAddr` response. + value: zn::Response, + + /// Instant after which `cached_addrs` should be refreshed. + refresh_time: Instant, +} + +impl CachedPeerAddrResponse { + /// Creates a new empty [`CachedPeerAddrResponse`]. + pub(super) fn new(address_book: Arc>) -> Self { + Self { + address_book, + value: zn::Response::Nil, + refresh_time: Instant::now(), + } + } + + pub(super) fn value(&self) -> zn::Response { + self.value.clone() + } + + /// Refreshes the `cached_addrs` if the time has past `refresh_time` or the cache is empty + pub(super) fn try_refresh(&mut self) { + let now = Instant::now(); + + // return early if there are some cached addresses, and they are still fresh + if now < self.refresh_time { + return; + } + + let cache_expiry = self.refresh_time + CACHED_ADDRS_REFRESH_INTERVAL; + + // try getting a lock on the address book if it's time to refresh the cached addresses + match self + .address_book + .try_lock() + .map(|book| book.fresh_get_addr_response()) + { + // Update cached value and refresh_time if there are some gossipable peers in the address book. + // + // Security: this avoids outdated gossiped peers. Outdated Zebra binaries will gradually lose all their peers, + // because those peers refuse to connect to outdated versions. So we don't want those outdated Zebra + // versions to keep gossiping old peer information either. + Ok(peers) if !peers.is_empty() => { + self.refresh_time = now + CACHED_ADDRS_REFRESH_INTERVAL; + self.value = zn::Response::Peers(peers); + } + + // Clear the cached response if the time has past the cache expiry time. + Ok(_) if now > cache_expiry => { + self.value = zn::Response::Nil; + } + + Err(TryLockError::WouldBlock) if now > cache_expiry => { + warn!("getaddrs response hasn't been refreshed in some time"); + self.value = zn::Response::Nil; + } + + // Don't update the cached response or refresh time if unable to get new peer addresses + // from the address book and `now` is before the cache expiry. + Ok(_) => { + debug!( + "could not refresh cached response because our address \ + book has no available peers" + ); + } + + Err(TryLockError::WouldBlock) => {} + + // Panic if the address book lock is poisoned + Err(TryLockError::Poisoned(_)) => { + panic!("previous thread panicked while holding the address book lock") + } + }; + } +} diff --git a/zebrad/src/components/inbound/downloads.rs b/zebrad/src/components/inbound/downloads.rs index 11200e66435..a1a36ed26d5 100644 --- a/zebrad/src/components/inbound/downloads.rs +++ b/zebrad/src/components/inbound/downloads.rs @@ -213,8 +213,8 @@ where "block hash already queued for inbound download: ignored block", ); - metrics::gauge!("gossip.queued.block.count", self.pending.len() as f64); - metrics::counter!("gossip.already.queued.dropped.block.hash.count", 1); + metrics::gauge!("gossip.queued.block.count").set(self.pending.len() as f64); + metrics::counter!("gossip.already.queued.dropped.block.hash.count").increment(1); return DownloadAction::AlreadyQueued; } @@ -227,8 +227,8 @@ where "too many blocks queued for inbound download: ignored block", ); - metrics::gauge!("gossip.queued.block.count", self.pending.len() as f64); - metrics::counter!("gossip.full.queue.dropped.block.hash.count", 1); + metrics::gauge!("gossip.queued.block.count").set(self.pending.len() as f64); + metrics::counter!("gossip.full.queue.dropped.block.hash.count").increment(1); return DownloadAction::FullQueue; } @@ -271,7 +271,7 @@ where } else { unreachable!("wrong response to block request"); }; - metrics::counter!("gossip.downloaded.block.count", 1); + metrics::counter!("gossip.downloaded.block.count").increment(1); // # Security & Performance // @@ -312,7 +312,7 @@ where ?hash, "gossiped block with no height: dropped downloaded block" ); - metrics::counter!("gossip.no.height.dropped.block.count", 1); + metrics::counter!("gossip.no.height.dropped.block.count").increment(1); BoxError::from("gossiped block with no height") })?; @@ -326,7 +326,7 @@ where lookahead_limit = full_verify_concurrency_limit, "gossiped block height too far ahead of the tip: dropped downloaded block", ); - metrics::counter!("gossip.max.height.limit.dropped.block.count", 1); + metrics::counter!("gossip.max.height.limit.dropped.block.count").increment(1); Err("gossiped block height too far ahead")?; } else if block_height < min_accepted_height { @@ -338,7 +338,7 @@ where behind_tip_limit = ?zs::MAX_BLOCK_REORG_HEIGHT, "gossiped block height behind the finalized tip: dropped downloaded block", ); - metrics::counter!("gossip.min.height.limit.dropped.block.count", 1); + metrics::counter!("gossip.min.height.limit.dropped.block.count").increment(1); Err("gossiped block height behind the finalized tip")?; } @@ -350,7 +350,7 @@ where } .map_ok(|(hash, height)| { info!(?height, "downloaded and verified gossiped block"); - metrics::counter!("gossip.verified.block.count", 1); + metrics::counter!("gossip.verified.block.count").increment(1); hash }) // Tack the hash onto the error so we can remove the cancel handle @@ -364,7 +364,7 @@ where biased; _ = &mut cancel_rx => { trace!("task cancelled prior to completion"); - metrics::counter!("gossip.cancelled.count", 1); + metrics::counter!("gossip.cancelled.count").increment(1); Err(("canceled".into(), hash)) } verification = fut => verification, @@ -383,7 +383,7 @@ where concurrency_limit = self.full_verify_concurrency_limit, "queued hash for download", ); - metrics::gauge!("gossip.queued.block.count", self.pending.len() as f64); + metrics::gauge!("gossip.queued.block.count").set(self.pending.len() as f64); DownloadAction::AddedToQueue } diff --git a/zebrad/src/components/inbound/tests/fake_peer_set.rs b/zebrad/src/components/inbound/tests/fake_peer_set.rs index 1a383bb90b3..44f27bd80e2 100644 --- a/zebrad/src/components/inbound/tests/fake_peer_set.rs +++ b/zebrad/src/components/inbound/tests/fake_peer_set.rs @@ -19,12 +19,16 @@ use zebra_chain::{ block::{Block, Height}, fmt::humantime_seconds, parameters::Network::{self, *}, - serialization::ZcashDeserializeInto, + serialization::{DateTime32, ZcashDeserializeInto}, transaction::{UnminedTx, UnminedTxId, VerifiedUnminedTx}, }; use zebra_consensus::{error::TransactionError, transaction, Config as ConsensusConfig}; use zebra_network::{ - constants::DEFAULT_MAX_CONNS_PER_IP, AddressBook, InventoryResponse, Request, Response, + constants::{ + ADDR_RESPONSE_LIMIT_DENOMINATOR, DEFAULT_MAX_CONNS_PER_IP, MAX_ADDRS_IN_ADDRESS_BOOK, + }, + types::{MetaAddr, PeerServices}, + AddressBook, InventoryResponse, Request, Response, }; use zebra_node_services::mempool; use zebra_state::{ChainTipChange, Config as StateConfig, CHAIN_TIP_UPDATE_WAIT_LIMIT}; @@ -742,6 +746,112 @@ async fn inbound_block_height_lookahead_limit() -> Result<(), crate::BoxError> { Ok(()) } +#[tokio::test(flavor = "multi_thread")] +/// Checks that Zebra won't give out its entire address book over a short duration. +async fn caches_getaddr_response() { + const NUM_ADDRESSES: usize = 20; + const NUM_REQUESTS: usize = 10; + const EXPECTED_NUM_RESULTS: usize = NUM_ADDRESSES / ADDR_RESPONSE_LIMIT_DENOMINATOR; + + let _init_guard = zebra_test::init(); + + let addrs = (0..NUM_ADDRESSES) + .map(|idx| format!("127.0.0.{idx}:{idx}")) + .map(|addr| { + MetaAddr::new_gossiped_meta_addr( + addr.parse().unwrap(), + PeerServices::NODE_NETWORK, + DateTime32::now(), + ) + }); + + let inbound = { + let network = Mainnet; + let consensus_config = ConsensusConfig::default(); + let state_config = StateConfig::ephemeral(); + let address_book = AddressBook::new_with_addrs( + SocketAddr::from_str("0.0.0.0:0").unwrap(), + Mainnet, + DEFAULT_MAX_CONNS_PER_IP, + MAX_ADDRS_IN_ADDRESS_BOOK, + Span::none(), + addrs, + ); + + let address_book = Arc::new(std::sync::Mutex::new(address_book)); + + // UTXO verification doesn't matter for these tests. + let (state, _read_only_state_service, latest_chain_tip, _chain_tip_change) = + zebra_state::init(state_config.clone(), network, Height::MAX, 0); + + let state_service = ServiceBuilder::new().buffer(1).service(state); + + // Download task panics and timeouts are propagated to the tests that use Groth16 verifiers. + let ( + block_verifier, + _transaction_verifier, + _groth16_download_handle, + _max_checkpoint_height, + ) = zebra_consensus::router::init(consensus_config.clone(), network, state_service.clone()) + .await; + + let peer_set = MockService::build() + .with_max_request_delay(MAX_PEER_SET_REQUEST_DELAY) + .for_unit_tests(); + let buffered_peer_set = Buffer::new(BoxService::new(peer_set.clone()), 10); + + let buffered_mempool_service = + Buffer::new(BoxService::new(MockService::build().for_unit_tests()), 10); + let (setup_tx, setup_rx) = oneshot::channel(); + + let inbound_service = ServiceBuilder::new() + .load_shed() + .service(Inbound::new(MAX_INBOUND_CONCURRENCY, setup_rx)); + let inbound_service = BoxService::new(inbound_service); + let inbound_service = ServiceBuilder::new().buffer(1).service(inbound_service); + + let setup_data = InboundSetupData { + address_book: address_book.clone(), + block_download_peer_set: buffered_peer_set, + block_verifier, + mempool: buffered_mempool_service.clone(), + state: state_service.clone(), + latest_chain_tip, + }; + let r = setup_tx.send(setup_data); + // We can't expect or unwrap because the returned Result does not implement Debug + assert!(r.is_ok(), "unexpected setup channel send failure"); + + inbound_service + }; + + let Ok(zebra_network::Response::Peers(first_result)) = + inbound.clone().oneshot(zebra_network::Request::Peers).await + else { + panic!("result should match Ok(Peers(_))") + }; + + assert_eq!( + first_result.len(), + EXPECTED_NUM_RESULTS, + "inbound service should respond with expected number of peer addresses", + ); + + for _ in 0..NUM_REQUESTS { + let Ok(zebra_network::Response::Peers(peers)) = + inbound.clone().oneshot(zebra_network::Request::Peers).await + else { + panic!("result should match Ok(Peers(_))") + }; + + assert_eq!( + peers, + first_result, + "inbound service should return the same result for every Peers request until the refresh time", + ); + } +} + /// Setup a fake Zebra network stack, with fake peer set. /// /// Adds some initial state blocks, and mempool transactions if `add_transactions` is true. @@ -787,13 +897,8 @@ async fn setup( // Download task panics and timeouts are propagated to the tests that use Groth16 verifiers. let (block_verifier, _transaction_verifier, _groth16_download_handle, _max_checkpoint_height) = - zebra_consensus::router::init( - consensus_config.clone(), - network, - state_service.clone(), - true, - ) - .await; + zebra_consensus::router::init(consensus_config.clone(), network, state_service.clone()) + .await; let mut peer_set = MockService::build() .with_max_request_delay(MAX_PEER_SET_REQUEST_DELAY) diff --git a/zebrad/src/components/inbound/tests/real_peer_set.rs b/zebrad/src/components/inbound/tests/real_peer_set.rs index ac773145966..0913d60274d 100644 --- a/zebrad/src/components/inbound/tests/real_peer_set.rs +++ b/zebrad/src/components/inbound/tests/real_peer_set.rs @@ -59,6 +59,9 @@ async fn inbound_peers_empty_address_book() -> Result<(), crate::BoxError> { listen_addr, ) = setup(None).await; + // yield and sleep until the address book lock is released. + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + // Send a request to inbound directly let request = inbound_service.clone().oneshot(Request::Peers); let response = request.await; diff --git a/zebrad/src/components/mempool.rs b/zebrad/src/components/mempool.rs index aef623e45fa..2d9b2b3e0c5 100644 --- a/zebrad/src/components/mempool.rs +++ b/zebrad/src/components/mempool.rs @@ -22,12 +22,13 @@ use std::{ collections::HashSet, future::Future, iter, - pin::Pin, + pin::{pin, Pin}, task::{Context, Poll}, }; use futures::{future::FutureExt, stream::Stream}; use tokio::sync::broadcast; +use tokio_stream::StreamExt; use tower::{buffer::Buffer, timeout::Timeout, util::BoxService, Service}; use zebra_chain::{ @@ -42,7 +43,7 @@ use zebra_node_services::mempool::{Gossip, Request, Response}; use zebra_state as zs; use zebra_state::{ChainTipChange, TipAction}; -use crate::components::sync::SyncStatus; +use crate::components::{mempool::crawler::RATE_LIMIT_DELAY, sync::SyncStatus}; pub mod config; mod crawler; @@ -580,9 +581,11 @@ impl Service for Mempool { let best_tip_height = self.latest_chain_tip.best_tip_height(); // Clean up completed download tasks and add to mempool if successful. - while let Poll::Ready(Some(r)) = tx_downloads.as_mut().poll_next(cx) { + while let Poll::Ready(Some(r)) = + pin!(tx_downloads.timeout(RATE_LIMIT_DELAY)).poll_next(cx) + { match r { - Ok((tx, expected_tip_height)) => { + Ok(Ok((tx, expected_tip_height))) => { // # Correctness: // // It's okay to use tip height here instead of the tip hash since @@ -609,12 +612,20 @@ impl Service for Mempool { tx_downloads.download_if_needed_and_verify(tx.transaction.into()); } } - Err((txid, error)) => { + Ok(Err((txid, error))) => { tracing::debug!(?txid, ?error, "mempool transaction failed to verify"); - metrics::counter!("mempool.failed.verify.tasks.total", 1, "reason" => error.to_string()); + metrics::counter!("mempool.failed.verify.tasks.total", "reason" => error.to_string()).increment(1); storage.reject_if_needed(txid, error); } + Err(_elapsed) => { + // A timeout happens when the stream hangs waiting for another service, + // so there is no specific transaction ID. + + tracing::info!("mempool transaction failed to verify due to timeout"); + + metrics::counter!("mempool.failed.verify.tasks.total", "reason" => "timeout").increment(1); + } }; } diff --git a/zebrad/src/components/mempool/config.rs b/zebrad/src/components/mempool/config.rs index 2b4f20f6a1b..7f2163f376f 100644 --- a/zebrad/src/components/mempool/config.rs +++ b/zebrad/src/components/mempool/config.rs @@ -5,7 +5,7 @@ use std::time::Duration; use serde::{Deserialize, Serialize}; /// Mempool configuration section. -#[derive(Clone, Debug, Deserialize, Serialize)] +#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)] #[serde(deny_unknown_fields, default)] pub struct Config { /// The mempool transaction cost limit. diff --git a/zebrad/src/components/mempool/crawler.rs b/zebrad/src/components/mempool/crawler.rs index 610691f79ee..6d1129e2b30 100644 --- a/zebrad/src/components/mempool/crawler.rs +++ b/zebrad/src/components/mempool/crawler.rs @@ -50,7 +50,11 @@ use std::{collections::HashSet, time::Duration}; use futures::{future, pin_mut, stream::FuturesUnordered, StreamExt}; -use tokio::{sync::watch, task::JoinHandle, time::sleep}; +use tokio::{ + sync::watch, + task::JoinHandle, + time::{sleep, timeout}, +}; use tower::{timeout::Timeout, BoxError, Service, ServiceExt}; use tracing_futures::Instrument; @@ -77,7 +81,7 @@ const FANOUT: usize = 3; /// /// Using a prime number makes sure that mempool crawler fanouts /// don't synchronise with other crawls. -const RATE_LIMIT_DELAY: Duration = Duration::from_secs(73); +pub const RATE_LIMIT_DELAY: Duration = Duration::from_secs(73); /// The time to wait for a peer response. /// @@ -191,7 +195,14 @@ where loop { self.wait_until_enabled().await?; - self.crawl_transactions().await?; + // Avoid hangs when the peer service is not ready, or due to bugs in async code. + timeout(RATE_LIMIT_DELAY, self.crawl_transactions()) + .await + .unwrap_or_else(|timeout| { + // Temporary errors just get logged and ignored. + info!("mempool crawl timed out: {timeout:?}"); + Ok(()) + })?; sleep(RATE_LIMIT_DELAY).await; } } diff --git a/zebrad/src/components/mempool/crawler/tests/timing.rs b/zebrad/src/components/mempool/crawler/tests/timing.rs index 381d6638806..e0f58a431b9 100644 --- a/zebrad/src/components/mempool/crawler/tests/timing.rs +++ b/zebrad/src/components/mempool/crawler/tests/timing.rs @@ -1,7 +1,5 @@ //! Timing tests for the mempool crawler. -use std::convert::TryInto; - use zebra_chain::parameters::POST_BLOSSOM_POW_TARGET_SPACING; use zebra_network::constants::{DEFAULT_CRAWL_NEW_PEER_INTERVAL, HANDSHAKE_TIMEOUT}; @@ -10,10 +8,7 @@ use crate::components::mempool::crawler::RATE_LIMIT_DELAY; #[test] fn ensure_timing_consistent() { assert!( - RATE_LIMIT_DELAY.as_secs() - < POST_BLOSSOM_POW_TARGET_SPACING - .try_into() - .expect("not negative"), + RATE_LIMIT_DELAY.as_secs() < POST_BLOSSOM_POW_TARGET_SPACING.into(), "a mempool crawl should complete before most new blocks" ); diff --git a/zebrad/src/components/mempool/downloads.rs b/zebrad/src/components/mempool/downloads.rs index 33e5d975d90..d3f62b4087b 100644 --- a/zebrad/src/components/mempool/downloads.rs +++ b/zebrad/src/components/mempool/downloads.rs @@ -253,10 +253,8 @@ where ?MAX_INBOUND_CONCURRENCY, "transaction id already queued for inbound download: ignored transaction" ); - metrics::gauge!( - "mempool.currently.queued.transactions", - self.pending.len() as f64, - ); + metrics::gauge!("mempool.currently.queued.transactions",) + .set(self.pending.len() as f64); return Err(MempoolError::AlreadyQueued); } @@ -268,10 +266,8 @@ where ?MAX_INBOUND_CONCURRENCY, "too many transactions queued for inbound download: ignored transaction" ); - metrics::gauge!( - "mempool.currently.queued.transactions", - self.pending.len() as f64, - ); + metrics::gauge!("mempool.currently.queued.transactions",) + .set(self.pending.len() as f64); return Err(MempoolError::FullQueue); } @@ -327,17 +323,15 @@ where metrics::counter!( "mempool.downloaded.transactions.total", - 1, "version" => format!("{}",tx.transaction.version()), - ); + ).increment(1); tx } Gossip::Tx(tx) => { metrics::counter!( "mempool.pushed.transactions.total", - 1, "version" => format!("{}",tx.transaction.version()), - ); + ).increment(1); tx } }; @@ -363,9 +357,8 @@ where .map_ok(|(tx, tip_height)| { metrics::counter!( "mempool.verified.transactions.total", - 1, "version" => format!("{}", tx.transaction.transaction.version()), - ); + ).increment(1); (tx, tip_height) }) // Tack the hash onto the error so we can remove the cancel handle @@ -384,7 +377,7 @@ where biased; _ = &mut cancel_rx => { trace!("task cancelled prior to completion"); - metrics::counter!("mempool.cancelled.verify.tasks.total", 1); + metrics::counter!("mempool.cancelled.verify.tasks.total").increment(1); Err((TransactionDownloadVerifyError::Cancelled, txid)) } verification = fut => verification, @@ -405,11 +398,8 @@ where ?MAX_INBOUND_CONCURRENCY, "queued transaction hash for download" ); - metrics::gauge!( - "mempool.currently.queued.transactions", - self.pending.len() as f64, - ); - metrics::counter!("mempool.queued.transactions.total", 1); + metrics::gauge!("mempool.currently.queued.transactions",).set(self.pending.len() as f64); + metrics::counter!("mempool.queued.transactions.total").increment(1); Ok(()) } @@ -446,10 +436,7 @@ where } assert!(self.pending.is_empty()); assert!(self.cancel_handles.is_empty()); - metrics::gauge!( - "mempool.currently.queued.transactions", - self.pending.len() as f64, - ); + metrics::gauge!("mempool.currently.queued.transactions",).set(self.pending.len() as f64); } /// Get the number of currently in-flight download tasks. @@ -498,6 +485,6 @@ where fn drop(mut self: Pin<&mut Self>) { self.cancel_all(); - metrics::gauge!("mempool.currently.queued.transactions", 0 as f64); + metrics::gauge!("mempool.currently.queued.transactions").set(0 as f64); } } diff --git a/zebrad/src/components/mempool/gossip.rs b/zebrad/src/components/mempool/gossip.rs index 6d3b2b638bf..4fee90d85a9 100644 --- a/zebrad/src/components/mempool/gossip.rs +++ b/zebrad/src/components/mempool/gossip.rs @@ -93,7 +93,7 @@ where // broadcast requests don't return errors, and we'd just want to ignore them anyway let _ = broadcast_network.ready().await?.call(request).await; - metrics::counter!("mempool.gossiped.transactions.total", txs_len as u64); + metrics::counter!("mempool.gossiped.transactions.total").increment(txs_len as u64); // wait for at least the network timeout between gossips // diff --git a/zebrad/src/components/mempool/storage.rs b/zebrad/src/components/mempool/storage.rs index bed0f9aabd2..90b114fffe1 100644 --- a/zebrad/src/components/mempool/storage.rs +++ b/zebrad/src/components/mempool/storage.rs @@ -651,16 +651,12 @@ impl Storage { /// /// Must be called every time the rejected lists change. fn update_rejected_metrics(&mut self) { - metrics::gauge!( - "mempool.rejected.transaction.ids", - self.rejected_transaction_count() as f64, - ); + metrics::gauge!("mempool.rejected.transaction.ids",) + .set(self.rejected_transaction_count() as f64); // This is just an approximation. // TODO: make it more accurate #2869 let item_size = size_of::<(transaction::Hash, SameEffectsTipRejectionError)>(); - metrics::gauge!( - "mempool.rejected.transaction.ids.bytes", - (self.rejected_transaction_count() * item_size) as f64, - ); + metrics::gauge!("mempool.rejected.transaction.ids.bytes",) + .set((self.rejected_transaction_count() * item_size) as f64); } } diff --git a/zebrad/src/components/mempool/storage/verified_set.rs b/zebrad/src/components/mempool/storage/verified_set.rs index 1d1f835fb36..a9c850b4ef8 100644 --- a/zebrad/src/components/mempool/storage/verified_set.rs +++ b/zebrad/src/components/mempool/storage/verified_set.rs @@ -337,63 +337,57 @@ impl VerifiedSet { metrics::gauge!( "zcash.mempool.actions.unpaid", - unpaid_actions_with_weight_lt20pct as f64, "bk" => "< 0.2", - ); + ) + .set(unpaid_actions_with_weight_lt20pct as f64); metrics::gauge!( "zcash.mempool.actions.unpaid", - unpaid_actions_with_weight_lt40pct as f64, "bk" => "< 0.4", - ); + ) + .set(unpaid_actions_with_weight_lt40pct as f64); metrics::gauge!( "zcash.mempool.actions.unpaid", - unpaid_actions_with_weight_lt60pct as f64, "bk" => "< 0.6", - ); + ) + .set(unpaid_actions_with_weight_lt60pct as f64); metrics::gauge!( "zcash.mempool.actions.unpaid", - unpaid_actions_with_weight_lt80pct as f64, "bk" => "< 0.8", - ); + ) + .set(unpaid_actions_with_weight_lt80pct as f64); metrics::gauge!( "zcash.mempool.actions.unpaid", - unpaid_actions_with_weight_lt1 as f64, "bk" => "< 1", - ); - metrics::gauge!("zcash.mempool.actions.paid", paid_actions as f64); - metrics::gauge!( - "zcash.mempool.size.transactions", - self.transaction_count() as f64, - ); + ) + .set(unpaid_actions_with_weight_lt1 as f64); + metrics::gauge!("zcash.mempool.actions.paid").set(paid_actions as f64); + metrics::gauge!("zcash.mempool.size.transactions",).set(self.transaction_count() as f64); metrics::gauge!( "zcash.mempool.size.weighted", - size_with_weight_lt1 as f64, "bk" => "< 1", - ); + ) + .set(size_with_weight_lt1 as f64); metrics::gauge!( "zcash.mempool.size.weighted", - size_with_weight_eq1 as f64, "bk" => "1", - ); + ) + .set(size_with_weight_eq1 as f64); metrics::gauge!( "zcash.mempool.size.weighted", - size_with_weight_gt1 as f64, "bk" => "> 1", - ); + ) + .set(size_with_weight_gt1 as f64); metrics::gauge!( "zcash.mempool.size.weighted", - size_with_weight_gt2 as f64, "bk" => "> 2", - ); + ) + .set(size_with_weight_gt2 as f64); metrics::gauge!( "zcash.mempool.size.weighted", - size_with_weight_gt3 as f64, "bk" => "> 3", - ); - metrics::gauge!( - "zcash.mempool.size.bytes", - self.transactions_serialized_size as f64, - ); - metrics::gauge!("zcash.mempool.cost.bytes", self.total_cost as f64); + ) + .set(size_with_weight_gt3 as f64); + metrics::gauge!("zcash.mempool.size.bytes",).set(self.transactions_serialized_size as f64); + metrics::gauge!("zcash.mempool.cost.bytes").set(self.total_cost as f64); } } diff --git a/zebrad/src/components/metrics.rs b/zebrad/src/components/metrics.rs index fd41e3699d3..5527d37747c 100644 --- a/zebrad/src/components/metrics.rs +++ b/zebrad/src/components/metrics.rs @@ -27,10 +27,11 @@ impl MetricsEndpoint { // Expose binary metadata to metrics, using a single time series with // value 1: // https://www.robustperception.io/exposing-the-software-version-to-prometheus - metrics::increment_counter!( + metrics::counter!( format!("{}.build.info", env!("CARGO_PKG_NAME")), "version" => env!("CARGO_PKG_VERSION") - ); + ) + .increment(1); } Err(e) => panic!( "Opening metrics endpoint listener {addr:?} failed: {e:?}. \ @@ -59,7 +60,7 @@ impl MetricsEndpoint { } /// Metrics configuration section. -#[derive(Clone, Debug, Deserialize, Serialize)] +#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)] #[serde(deny_unknown_fields, default)] pub struct Config { /// The address used for the Prometheus metrics endpoint. diff --git a/zebrad/src/components/miner.rs b/zebrad/src/components/miner.rs new file mode 100644 index 00000000000..ace2ffe5e83 --- /dev/null +++ b/zebrad/src/components/miner.rs @@ -0,0 +1,556 @@ +//! Internal mining in Zebra. +//! +//! # TODO +//! - pause mining if we have no peers, like `zcashd` does, +//! and add a developer config that mines regardless of how many peers we have. +//! +//! - move common code into zebra-chain or zebra-node-services and remove the RPC dependency. + +use std::{cmp::min, sync::Arc, thread::available_parallelism, time::Duration}; + +use color_eyre::Report; +use futures::{stream::FuturesUnordered, StreamExt}; +use thread_priority::{ThreadBuilder, ThreadPriority}; +use tokio::{select, sync::watch, task::JoinHandle, time::sleep}; +use tower::Service; +use tracing::{Instrument, Span}; + +use zebra_chain::{ + block::{self, Block}, + chain_sync_status::ChainSyncStatus, + chain_tip::ChainTip, + diagnostic::task::WaitForPanics, + serialization::{AtLeastOne, ZcashSerialize}, + shutdown::is_shutting_down, + work::equihash::{Solution, SolverCancelled}, +}; +use zebra_network::AddressBookPeers; +use zebra_node_services::mempool; +use zebra_rpc::{ + config::mining::Config, + methods::{ + get_block_template_rpcs::{ + get_block_template::{ + self, proposal::TimeSource, proposal_block_from_template, + GetBlockTemplateCapability::*, GetBlockTemplateRequestMode::*, + }, + types::hex_data::HexData, + }, + GetBlockTemplateRpc, GetBlockTemplateRpcImpl, + }, +}; +use zebra_state::WatchReceiver; + +/// The amount of time we wait between block template retries. +pub const BLOCK_TEMPLATE_WAIT_TIME: Duration = Duration::from_secs(20); + +/// A rate-limit for block template refreshes. +pub const BLOCK_TEMPLATE_REFRESH_LIMIT: Duration = Duration::from_secs(2); + +/// How long we wait after mining a block, before expecting a new template. +/// +/// This should be slightly longer than `BLOCK_TEMPLATE_REFRESH_LIMIT` to allow for template +/// generation. +pub const BLOCK_MINING_WAIT_TIME: Duration = Duration::from_secs(3); + +/// Initialize the miner based on its config, and spawn a task for it. +/// +/// This method is CPU and memory-intensive. It uses 144 MB of RAM and one CPU core per configured +/// mining thread. +/// +/// See [`run_mining_solver()`] for more details. +pub fn spawn_init( + config: &Config, + rpc: GetBlockTemplateRpcImpl, +) -> JoinHandle> +// TODO: simplify or avoid repeating these generics (how?) +where + Mempool: Service< + mempool::Request, + Response = mempool::Response, + Error = zebra_node_services::BoxError, + > + Clone + + Send + + Sync + + 'static, + Mempool::Future: Send, + State: Service< + zebra_state::ReadRequest, + Response = zebra_state::ReadResponse, + Error = zebra_state::BoxError, + > + Clone + + Send + + Sync + + 'static, + >::Future: Send, + Tip: ChainTip + Clone + Send + Sync + 'static, + BlockVerifierRouter: Service + + Clone + + Send + + Sync + + 'static, + >::Future: Send, + SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, + AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, +{ + let config = config.clone(); + + // TODO: spawn an entirely new executor here, so mining is isolated from higher priority tasks. + tokio::spawn(init(config, rpc).in_current_span()) +} + +/// Initialize the miner based on its config. +/// +/// This method is CPU and memory-intensive. It uses 144 MB of RAM and one CPU core per configured +/// mining thread. +/// +/// See [`run_mining_solver()`] for more details. +pub async fn init( + _config: Config, + rpc: GetBlockTemplateRpcImpl, +) -> Result<(), Report> +where + Mempool: Service< + mempool::Request, + Response = mempool::Response, + Error = zebra_node_services::BoxError, + > + Clone + + Send + + Sync + + 'static, + Mempool::Future: Send, + State: Service< + zebra_state::ReadRequest, + Response = zebra_state::ReadResponse, + Error = zebra_state::BoxError, + > + Clone + + Send + + Sync + + 'static, + >::Future: Send, + Tip: ChainTip + Clone + Send + Sync + 'static, + BlockVerifierRouter: Service + + Clone + + Send + + Sync + + 'static, + >::Future: Send, + SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, + AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, +{ + // TODO: change this to `config.internal_miner_threads` when internal miner feature is added back. + // https://github.com/ZcashFoundation/zebra/issues/8183 + let configured_threads = 1; + // If we can't detect the number of cores, use the configured number. + let available_threads = available_parallelism() + .map(usize::from) + .unwrap_or(configured_threads); + + // Use the minimum of the configured and available threads. + let solver_count = min(configured_threads, available_threads); + + info!( + ?solver_count, + "launching mining tasks with parallel solvers" + ); + + let (template_sender, template_receiver) = watch::channel(None); + let template_receiver = WatchReceiver::new(template_receiver); + + // Spawn these tasks, to avoid blocked cooperative futures, and improve shutdown responsiveness. + // This is particularly important when there are a large number of solver threads. + let mut abort_handles = Vec::new(); + + let template_generator = tokio::task::spawn( + generate_block_templates(rpc.clone(), template_sender).in_current_span(), + ); + abort_handles.push(template_generator.abort_handle()); + let template_generator = template_generator.wait_for_panics(); + + let mut mining_solvers = FuturesUnordered::new(); + for solver_id in 0..solver_count { + // Assume there are less than 256 cores. If there are more, only run 256 tasks. + let solver_id = min(solver_id, usize::from(u8::MAX)) + .try_into() + .expect("just limited to u8::MAX"); + + let solver = tokio::task::spawn( + run_mining_solver(solver_id, template_receiver.clone(), rpc.clone()).in_current_span(), + ); + abort_handles.push(solver.abort_handle()); + + mining_solvers.push(solver.wait_for_panics()); + } + + // These tasks run forever unless there is a fatal error or shutdown. + // When that happens, the first task to error returns, and the other JoinHandle futures are + // cancelled. + let first_result; + select! { + result = template_generator => { first_result = result; } + result = mining_solvers.next() => { + first_result = result + .expect("stream never terminates because there is at least one solver task"); + } + } + + // But the spawned async tasks keep running, so we need to abort them here. + for abort_handle in abort_handles { + abort_handle.abort(); + } + + // Any spawned blocking threads will keep running. When this task returns and drops the + // `template_sender`, it cancels all the spawned miner threads. This works because we've + // aborted the `template_generator` task, which owns the `template_sender`. (And it doesn't + // spawn any blocking threads.) + first_result +} + +/// Generates block templates using `rpc`, and sends them to mining threads using `template_sender`. +#[instrument(skip(rpc, template_sender))] +pub async fn generate_block_templates< + Mempool, + State, + Tip, + BlockVerifierRouter, + SyncStatus, + AddressBook, +>( + rpc: GetBlockTemplateRpcImpl, + template_sender: watch::Sender>>, +) -> Result<(), Report> +where + Mempool: Service< + mempool::Request, + Response = mempool::Response, + Error = zebra_node_services::BoxError, + > + Clone + + Send + + Sync + + 'static, + Mempool::Future: Send, + State: Service< + zebra_state::ReadRequest, + Response = zebra_state::ReadResponse, + Error = zebra_state::BoxError, + > + Clone + + Send + + Sync + + 'static, + >::Future: Send, + Tip: ChainTip + Clone + Send + Sync + 'static, + BlockVerifierRouter: Service + + Clone + + Send + + Sync + + 'static, + >::Future: Send, + SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, + AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, +{ + // Pass the correct arguments, even if Zebra currently ignores them. + let mut parameters = get_block_template::JsonParameters { + mode: Template, + data: None, + capabilities: vec![LongPoll, CoinbaseTxn], + long_poll_id: None, + _work_id: None, + }; + + // Shut down the task when all the template receivers are dropped, or Zebra shuts down. + while !template_sender.is_closed() && !is_shutting_down() { + let template = rpc.get_block_template(Some(parameters.clone())).await; + + // Wait for the chain to sync so we get a valid template. + let Ok(template) = template else { + debug!( + ?BLOCK_TEMPLATE_WAIT_TIME, + "waiting for a valid block template", + ); + + // Skip the wait if we got an error because we are shutting down. + if !is_shutting_down() { + sleep(BLOCK_TEMPLATE_WAIT_TIME).await; + } + + continue; + }; + + // Convert from RPC GetBlockTemplate to Block + let template = template + .try_into_template() + .expect("invalid RPC response: proposal in response to a template request"); + + info!( + height = ?template.height, + transactions = ?template.transactions.len(), + "mining with an updated block template", + ); + + // Tell the next get_block_template() call to wait until the template has changed. + parameters.long_poll_id = Some(template.long_poll_id); + + let block = proposal_block_from_template(&template, TimeSource::CurTime) + .expect("unexpected invalid block template"); + + // If the template has actually changed, send an updated template. + template_sender.send_if_modified(|old_block| { + if old_block.as_ref().map(|b| *b.header) == Some(*block.header) { + return false; + } + *old_block = Some(Arc::new(block)); + true + }); + + // If the blockchain is changing rapidly, limit how often we'll update the template. + // But if we're shutting down, do that immediately. + if !template_sender.is_closed() && !is_shutting_down() { + sleep(BLOCK_TEMPLATE_REFRESH_LIMIT).await; + } + } + + Ok(()) +} + +/// Runs a single mining thread that gets blocks from the `template_receiver`, calculates equihash +/// solutions with nonces based on `solver_id`, and submits valid blocks to Zebra's block validator. +/// +/// This method is CPU and memory-intensive. It uses 144 MB of RAM and one CPU core while running. +/// It can run for minutes or hours if the network difficulty is high. Mining uses a thread with +/// low CPU priority. +#[instrument(skip(template_receiver, rpc))] +pub async fn run_mining_solver( + solver_id: u8, + mut template_receiver: WatchReceiver>>, + rpc: GetBlockTemplateRpcImpl, +) -> Result<(), Report> +where + Mempool: Service< + mempool::Request, + Response = mempool::Response, + Error = zebra_node_services::BoxError, + > + Clone + + Send + + Sync + + 'static, + Mempool::Future: Send, + State: Service< + zebra_state::ReadRequest, + Response = zebra_state::ReadResponse, + Error = zebra_state::BoxError, + > + Clone + + Send + + Sync + + 'static, + >::Future: Send, + Tip: ChainTip + Clone + Send + Sync + 'static, + BlockVerifierRouter: Service + + Clone + + Send + + Sync + + 'static, + >::Future: Send, + SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, + AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, +{ + // Shut down the task when the template sender is dropped, or Zebra shuts down. + while template_receiver.has_changed().is_ok() && !is_shutting_down() { + // Get the latest block template, and mark the current value as seen. + // We mark the value first to avoid missed updates. + template_receiver.mark_as_seen(); + let template = template_receiver.cloned_watch_data(); + + let Some(template) = template else { + if solver_id == 0 { + info!( + ?solver_id, + ?BLOCK_TEMPLATE_WAIT_TIME, + "solver waiting for initial block template" + ); + } else { + debug!( + ?solver_id, + ?BLOCK_TEMPLATE_WAIT_TIME, + "solver waiting for initial block template" + ); + } + + // Skip the wait if we didn't get a template because we are shutting down. + if !is_shutting_down() { + sleep(BLOCK_TEMPLATE_WAIT_TIME).await; + } + + continue; + }; + + let height = template.coinbase_height().expect("template is valid"); + + // Set up the cancellation conditions for the miner. + let mut cancel_receiver = template_receiver.clone(); + let old_header = *template.header; + let cancel_fn = move || match cancel_receiver.has_changed() { + // Guard against get_block_template() providing an identical header. This could happen + // if something irrelevant to the block data changes, the time was within 1 second, or + // there is a spurious channel change. + Ok(has_changed) => { + cancel_receiver.mark_as_seen(); + + // We only need to check header equality, because the block data is bound to the + // header. + if has_changed + && Some(old_header) != cancel_receiver.cloned_watch_data().map(|b| *b.header) + { + Err(SolverCancelled) + } else { + Ok(()) + } + } + // If the sender was dropped, we're likely shutting down, so cancel the solver. + Err(_sender_dropped) => Err(SolverCancelled), + }; + + // Mine at least one block using the equihash solver. + let Ok(blocks) = mine_a_block(solver_id, template, cancel_fn).await else { + // If the solver was cancelled, we're either shutting down, or we have a new template. + if solver_id == 0 { + info!( + ?height, + ?solver_id, + new_template = ?template_receiver.has_changed(), + shutting_down = ?is_shutting_down(), + "solver cancelled: getting a new block template or shutting down" + ); + } else { + debug!( + ?height, + ?solver_id, + new_template = ?template_receiver.has_changed(), + shutting_down = ?is_shutting_down(), + "solver cancelled: getting a new block template or shutting down" + ); + } + + // If the blockchain is changing rapidly, limit how often we'll update the template. + // But if we're shutting down, do that immediately. + if template_receiver.has_changed().is_ok() && !is_shutting_down() { + sleep(BLOCK_TEMPLATE_REFRESH_LIMIT).await; + } + + continue; + }; + + // Submit the newly mined blocks to the verifiers. + // + // TODO: if there is a new template (`cancel_fn().is_err()`), and + // GetBlockTemplate.submit_old is false, return immediately, and skip submitting the + // blocks. + let mut any_success = false; + for block in blocks { + let data = block + .zcash_serialize_to_vec() + .expect("serializing to Vec never fails"); + + match rpc.submit_block(HexData(data), None).await { + Ok(success) => { + info!( + ?height, + hash = ?block.hash(), + ?solver_id, + ?success, + "successfully mined a new block", + ); + any_success = true; + } + Err(error) => info!( + ?height, + hash = ?block.hash(), + ?solver_id, + ?error, + "validating a newly mined block failed, trying again", + ), + } + } + + // Start re-mining quickly after a failed solution. + // If there's a new template, we'll use it, otherwise the existing one is ok. + if !any_success { + // If the blockchain is changing rapidly, limit how often we'll update the template. + // But if we're shutting down, do that immediately. + if template_receiver.has_changed().is_ok() && !is_shutting_down() { + sleep(BLOCK_TEMPLATE_REFRESH_LIMIT).await; + } + continue; + } + + // Wait for the new block to verify, and the RPC task to pick up a new template. + // But don't wait too long, we could have mined on a fork. + tokio::select! { + shutdown_result = template_receiver.changed() => shutdown_result?, + _ = sleep(BLOCK_MINING_WAIT_TIME) => {} + + } + } + + Ok(()) +} + +/// Mines one or more blocks based on `template`. Calculates equihash solutions, checks difficulty, +/// and returns as soon as it has at least one block. Uses a different nonce range for each +/// `solver_id`. +/// +/// If `cancel_fn()` returns an error, returns early with `Err(SolverCancelled)`. +/// +/// See [`run_mining_solver()`] for more details. +pub async fn mine_a_block( + solver_id: u8, + template: Arc, + cancel_fn: F, +) -> Result, SolverCancelled> +where + F: FnMut() -> Result<(), SolverCancelled> + Send + Sync + 'static, +{ + // TODO: Replace with Arc::unwrap_or_clone() when it stabilises: + // https://github.com/rust-lang/rust/issues/93610 + let mut header = *template.header; + + // Use a different nonce for each solver thread. + // Change both the first and last bytes, so we don't have to care if the nonces are incremented in + // big-endian or little-endian order. And we can see the thread that mined a block from the nonce. + *header.nonce.first_mut().unwrap() = solver_id; + *header.nonce.last_mut().unwrap() = solver_id; + + // Mine one or more blocks using the solver, in a low-priority blocking thread. + let span = Span::current(); + let solved_headers = + tokio::task::spawn_blocking(move || span.in_scope(move || { + let miner_thread_handle = ThreadBuilder::default().name("zebra-miner").priority(ThreadPriority::Min).spawn(move |priority_result| { + if let Err(error) = priority_result { + info!(?error, "could not set miner to run at a low priority: running at default priority"); + } + + Solution::solve(header, cancel_fn) + }).expect("unable to spawn miner thread"); + + miner_thread_handle.wait_for_panics() + })) + .wait_for_panics() + .await?; + + // Modify the template into solved blocks. + + // TODO: Replace with Arc::unwrap_or_clone() when it stabilises + let block = (*template).clone(); + + let solved_blocks: Vec = solved_headers + .into_iter() + .map(|header| { + let mut block = block.clone(); + block.header = Arc::new(header); + block + }) + .collect(); + + Ok(solved_blocks + .try_into() + .expect("a 1:1 mapping of AtLeastOne produces at least one block")) +} diff --git a/zebrad/src/components/sync.rs b/zebrad/src/components/sync.rs index 9dc6da28589..651dacc1f8d 100644 --- a/zebrad/src/components/sync.rs +++ b/zebrad/src/components/sync.rs @@ -2,13 +2,17 @@ //! //! It is used when Zebra is a long way behind the current chain tip. -use std::{cmp::max, collections::HashSet, pin::Pin, task::Poll, time::Duration}; +use std::{cmp::max, collections::HashSet, convert, pin::Pin, task::Poll, time::Duration}; use color_eyre::eyre::{eyre, Report}; use futures::stream::{FuturesUnordered, StreamExt}; use indexmap::IndexSet; use serde::{Deserialize, Serialize}; -use tokio::{sync::watch, task::JoinError, time::sleep}; +use tokio::{ + sync::watch, + task::JoinError, + time::{sleep, timeout}, +}; use tower::{ builder::ServiceBuilder, hedge::Hedge, limit::ConcurrencyLimit, retry::Retry, timeout::Timeout, Service, ServiceExt, @@ -210,10 +214,13 @@ const SYNC_RESTART_DELAY: Duration = Duration::from_secs(67); /// If this timeout is removed (or set too low), Zebra will immediately retry /// to download and verify the genesis block from its peers. This can cause /// a denial of service on those peers. -const GENESIS_TIMEOUT_RETRY: Duration = Duration::from_secs(5); +/// +/// If this timeout is too short, old or buggy nodes will keep making useless +/// network requests. If there are a lot of them, it could overwhelm the network. +const GENESIS_TIMEOUT_RETRY: Duration = Duration::from_secs(10); /// Sync configuration section. -#[derive(Clone, Debug, Deserialize, Serialize)] +#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)] #[serde(deny_unknown_fields, default)] pub struct Config { /// The number of parallel block download requests. @@ -254,7 +261,8 @@ pub struct Config { /// The number of threads used to verify signatures, proofs, and other CPU-intensive code. /// - /// Set to `0` by default, which uses one thread per available CPU core. + /// If the number of threads is not configured or zero, Zebra uses the number of logical cores. + /// If the number of logical cores can't be detected, Zebra uses one thread. /// For details, see [the `rayon` documentation](https://docs.rs/rayon/latest/rayon/struct.ThreadPoolBuilder.html#method.num_threads). pub parallel_cpu_threads: usize, } @@ -541,7 +549,8 @@ where /// following a fork. Either way, Zebra should attempt to obtain some more tips. /// /// Returns `Err` if there was an unrecoverable error and restarting the synchronization is - /// necessary. + /// necessary. This includes outer timeouts, where an entire syncing step takes an extremely + /// long time. (These usually indicate hangs.) #[instrument(skip(self))] async fn try_to_sync(&mut self) -> Result<(), Report> { self.prospective_tips = HashSet::new(); @@ -550,76 +559,106 @@ where state_tip = ?self.latest_chain_tip.best_tip_height(), "starting sync, obtaining new tips" ); - let mut extra_hashes = self.obtain_tips().await.map_err(|e| { - info!("temporary error obtaining tips: {:#}", e); - e - })?; + let mut extra_hashes = timeout(SYNC_RESTART_DELAY, self.obtain_tips()) + .await + .map_err(Into::into) + // TODO: replace with flatten() when it stabilises (#70142) + .and_then(convert::identity) + .map_err(|e| { + info!("temporary error obtaining tips: {:#}", e); + e + })?; self.update_metrics(); while !self.prospective_tips.is_empty() || !extra_hashes.is_empty() { - // Check whether any block tasks are currently ready: - while let Poll::Ready(Some(rsp)) = futures::poll!(self.downloads.next()) { - self.handle_block_response(rsp)?; - } - self.update_metrics(); + // Avoid hangs due to service readiness or other internal operations + extra_hashes = timeout(BLOCK_VERIFY_TIMEOUT, self.try_to_sync_once(extra_hashes)) + .await + .map_err(Into::into) + // TODO: replace with flatten() when it stabilises (#70142) + .and_then(convert::identity)?; + } - // Pause new downloads while the syncer or downloader are past their lookahead limits. - // - // To avoid a deadlock or long waits for blocks to expire, we ignore the download - // lookahead limit when there are only a small number of blocks waiting. - while self.downloads.in_flight() >= self.lookahead_limit(extra_hashes.len()) - || (self.downloads.in_flight() >= self.lookahead_limit(extra_hashes.len()) / 2 - && self.past_lookahead_limit_receiver.cloned_watch_data()) - { - trace!( - tips.len = self.prospective_tips.len(), - in_flight = self.downloads.in_flight(), - extra_hashes = extra_hashes.len(), - lookahead_limit = self.lookahead_limit(extra_hashes.len()), - state_tip = ?self.latest_chain_tip.best_tip_height(), - "waiting for pending blocks", - ); + info!("exhausted prospective tip set"); - let response = self.downloads.next().await.expect("downloads is nonempty"); + Ok(()) + } - self.handle_block_response(response)?; - self.update_metrics(); - } + /// Tries to synchronize the chain once, using the existing `extra_hashes`. + /// + /// Tries to extend the existing tips and download the missing blocks. + /// + /// Returns `Ok(extra_hashes)` if it was able to extend once and synchronize sone of the chain. + /// Returns `Err` if there was an unrecoverable error and restarting the synchronization is + /// necessary. + #[instrument(skip(self))] + async fn try_to_sync_once( + &mut self, + mut extra_hashes: IndexSet, + ) -> Result, Report> { + // Check whether any block tasks are currently ready. + while let Poll::Ready(Some(rsp)) = futures::poll!(self.downloads.next()) { + // Some temporary errors are ignored, and syncing continues with other blocks. + // If it turns out they were actually important, syncing will run out of blocks, and + // the syncer will reset itself. + self.handle_block_response(rsp)?; + } + self.update_metrics(); - // Once we're below the lookahead limit, we can request more blocks or hashes. - if !extra_hashes.is_empty() { - debug!( - tips.len = self.prospective_tips.len(), - in_flight = self.downloads.in_flight(), - extra_hashes = extra_hashes.len(), - lookahead_limit = self.lookahead_limit(extra_hashes.len()), - state_tip = ?self.latest_chain_tip.best_tip_height(), - "requesting more blocks", - ); + // Pause new downloads while the syncer or downloader are past their lookahead limits. + // + // To avoid a deadlock or long waits for blocks to expire, we ignore the download + // lookahead limit when there are only a small number of blocks waiting. + while self.downloads.in_flight() >= self.lookahead_limit(extra_hashes.len()) + || (self.downloads.in_flight() >= self.lookahead_limit(extra_hashes.len()) / 2 + && self.past_lookahead_limit_receiver.cloned_watch_data()) + { + trace!( + tips.len = self.prospective_tips.len(), + in_flight = self.downloads.in_flight(), + extra_hashes = extra_hashes.len(), + lookahead_limit = self.lookahead_limit(extra_hashes.len()), + state_tip = ?self.latest_chain_tip.best_tip_height(), + "waiting for pending blocks", + ); - let response = self.request_blocks(extra_hashes).await; - extra_hashes = Self::handle_hash_response(response)?; - } else { - info!( - tips.len = self.prospective_tips.len(), - in_flight = self.downloads.in_flight(), - extra_hashes = extra_hashes.len(), - lookahead_limit = self.lookahead_limit(extra_hashes.len()), - state_tip = ?self.latest_chain_tip.best_tip_height(), - "extending tips", - ); + let response = self.downloads.next().await.expect("downloads is nonempty"); - extra_hashes = self.extend_tips().await.map_err(|e| { - info!("temporary error extending tips: {:#}", e); - e - })?; - } + self.handle_block_response(response)?; self.update_metrics(); } - info!("exhausted prospective tip set"); + // Once we're below the lookahead limit, we can request more blocks or hashes. + if !extra_hashes.is_empty() { + debug!( + tips.len = self.prospective_tips.len(), + in_flight = self.downloads.in_flight(), + extra_hashes = extra_hashes.len(), + lookahead_limit = self.lookahead_limit(extra_hashes.len()), + state_tip = ?self.latest_chain_tip.best_tip_height(), + "requesting more blocks", + ); - Ok(()) + let response = self.request_blocks(extra_hashes).await; + extra_hashes = Self::handle_hash_response(response)?; + } else { + info!( + tips.len = self.prospective_tips.len(), + in_flight = self.downloads.in_flight(), + extra_hashes = extra_hashes.len(), + lookahead_limit = self.lookahead_limit(extra_hashes.len()), + state_tip = ?self.latest_chain_tip.best_tip_height(), + "extending tips", + ); + + extra_hashes = self.extend_tips().await.map_err(|e| { + info!("temporary error extending tips: {:#}", e); + e + })?; + } + self.update_metrics(); + + Ok(extra_hashes) } /// Given a block_locator list fan out request for subsequent hashes to @@ -751,7 +790,8 @@ where let new_download_len = download_set.len(); let new_hashes = new_download_len - prev_download_len; debug!(new_hashes, "added hashes to download set"); - metrics::histogram!("sync.obtain.response.hash.count", new_hashes as f64); + metrics::histogram!("sync.obtain.response.hash.count") + .record(new_hashes as f64); } Ok(_) => unreachable!("network returned wrong response"), // We ignore this error because we made multiple fanout requests. @@ -771,7 +811,7 @@ where let new_downloads = download_set.len(); debug!(new_downloads, "queueing new downloads"); - metrics::gauge!("sync.obtain.queued.hash.count", new_downloads as f64); + metrics::gauge!("sync.obtain.queued.hash.count").set(new_downloads as f64); // security: use the actual number of new downloads from all peers, // so the last peer to respond can't toggle our mempool @@ -897,7 +937,8 @@ where let new_download_len = download_set.len(); let new_hashes = new_download_len - prev_download_len; debug!(new_hashes, "added hashes to download set"); - metrics::histogram!("sync.extend.response.hash.count", new_hashes as f64); + metrics::histogram!("sync.extend.response.hash.count") + .record(new_hashes as f64); } Ok(_) => unreachable!("network returned wrong response"), // We ignore this error because we made multiple fanout requests. @@ -908,7 +949,7 @@ where let new_downloads = download_set.len(); debug!(new_downloads, "queueing new downloads"); - metrics::gauge!("sync.extend.queued.hash.count", new_downloads as f64); + metrics::gauge!("sync.extend.queued.hash.count").set(new_downloads as f64); // security: use the actual number of new downloads from all peers, // so the last peer to respond can't toggle our mempool @@ -932,16 +973,19 @@ where while !self.state_contains(self.genesis_hash).await? { info!("starting genesis block download and verify"); - let response = self.downloads.download_and_verify(self.genesis_hash).await; - Self::handle_response(response).map_err(|e| eyre!(e))?; - - let response = self.downloads.next().await.expect("downloads is nonempty"); + let response = timeout(SYNC_RESTART_DELAY, self.request_genesis_once()) + .await + .map_err(Into::into); + // 3 layers of results is not ideal, but we need the timeout on the outside. match response { - Ok(response) => self + Ok(Ok(Ok(response))) => self .handle_block_response(Ok(response)) .expect("never returns Err for Ok"), - Err(error) => { + // Handle fatal errors + Ok(Err(fatal_error)) => Err(fatal_error)?, + // Handle timeouts and block errors + Err(error) | Ok(Ok(Err(error))) => { // TODO: exit syncer on permanent service errors (NetworkError, VerifierError) if Self::should_restart_sync(&error) { warn!( @@ -963,6 +1007,20 @@ where Ok(()) } + /// Try to download and verify the genesis block once. + /// + /// Fatal errors are returned in the outer result, temporary errors in the inner one. + async fn request_genesis_once( + &mut self, + ) -> Result, Report> { + let response = self.downloads.download_and_verify(self.genesis_hash).await; + Self::handle_response(response).map_err(|e| eyre!(e))?; + + let response = self.downloads.next().await.expect("downloads is nonempty"); + + Ok(response) + } + /// Queue download and verify tasks for each block that isn't currently known to our node. /// /// TODO: turn obtain and extend tips into a separate task, which sends hashes via a channel? @@ -1094,14 +1152,8 @@ where } fn update_metrics(&mut self) { - metrics::gauge!( - "sync.prospective_tips.len", - self.prospective_tips.len() as f64, - ); - metrics::gauge!( - "sync.downloads.in_flight", - self.downloads.in_flight() as f64, - ); + metrics::gauge!("sync.prospective_tips.len",).set(self.prospective_tips.len() as f64); + metrics::gauge!("sync.downloads.in_flight",).set(self.downloads.in_flight() as f64); } /// Return if the sync should be restarted based on the given error diff --git a/zebrad/src/components/sync/downloads.rs b/zebrad/src/components/sync/downloads.rs index f43d8005280..67f6ee4844f 100644 --- a/zebrad/src/components/sync/downloads.rs +++ b/zebrad/src/components/sync/downloads.rs @@ -4,7 +4,7 @@ use std::{ collections::HashMap, convert::{self, TryFrom}, pin::Pin, - sync::{Arc, TryLockError}, + sync::Arc, task::{Context, Poll}, }; @@ -154,6 +154,17 @@ pub enum BlockDownloadVerifyError { height: block::Height, hash: block::Hash, }, + + #[error( + "timeout during service readiness, download, verification, or internal downloader operation" + )] + Timeout, +} + +impl From for BlockDownloadVerifyError { + fn from(_value: tokio::time::error::Elapsed) -> Self { + BlockDownloadVerifyError::Timeout + } } /// Represents a [`Stream`] of download and verification tasks during chain sync. @@ -319,7 +330,7 @@ where hash: block::Hash, ) -> Result<(), BlockDownloadVerifyError> { if self.cancel_handles.contains_key(&hash) { - metrics::counter!("sync.already.queued.dropped.block.hash.count", 1); + metrics::counter!("sync.already.queued.dropped.block.hash.count").increment(1); return Err(BlockDownloadVerifyError::DuplicateBlockQueuedForDownload { hash }); } @@ -357,7 +368,7 @@ where biased; _ = &mut cancel_rx => { trace!("task cancelled prior to download completion"); - metrics::counter!("sync.cancelled.download.count", 1); + metrics::counter!("sync.cancelled.download.count").increment(1); return Err(BlockDownloadVerifyError::CancelledDuringDownload { hash }) } rsp = block_req => rsp.map_err(|error| BlockDownloadVerifyError::DownloadFailed { error, hash})?, @@ -378,7 +389,7 @@ where } else { unreachable!("wrong response to block request"); }; - metrics::counter!("sync.downloaded.block.count", 1); + metrics::counter!("sync.downloaded.block.count").increment(1); // Security & Performance: reject blocks that are too far ahead of our tip. // Avoids denial of service attacks, and reduces wasted work on high blocks @@ -429,7 +440,7 @@ where ?hash, "synced block with no height: dropped downloaded block" ); - metrics::counter!("sync.no.height.dropped.block.count", 1); + metrics::counter!("sync.no.height.dropped.block.count").increment(1); return Err(BlockDownloadVerifyError::InvalidHeight { hash }); }; @@ -469,21 +480,16 @@ where ); } - metrics::counter!("sync.max.height.limit.paused.count", 1); + metrics::counter!("sync.max.height.limit.paused.count").increment(1); } else if block_height <= lookahead_reset_height && past_lookahead_limit_receiver.cloned_watch_data() { - // Try to reset the watched value to false, since we're well under the limit. - match past_lookahead_limit_sender.try_lock() { - Ok(watch_sender_guard) => { - // If Zebra is shutting down, ignore the send error. - let _ = watch_sender_guard.send(true); - metrics::counter!("sync.max.height.limit.reset.count", 1); - }, - Err(TryLockError::Poisoned(_)) => panic!("thread panicked while holding the past_lookahead_limit_sender mutex guard"), - // We'll try allowing new downloads when we get the next block - Err(TryLockError::WouldBlock) => {} - } + // Reset the watched value to false, since we're well under the limit. + // We need to block here, because if we don't the syncer can hang. + + // But if Zebra is shutting down, ignore the send error. + let _ = past_lookahead_limit_sender.lock().expect("thread panicked while holding the past_lookahead_limit_sender mutex guard").send(false); + metrics::counter!("sync.max.height.limit.reset.count").increment(1); - metrics::counter!("sync.max.height.limit.reset.attempt.count", 1); + metrics::counter!("sync.max.height.limit.reset.attempt.count").increment(1); } if block_height < min_accepted_height { @@ -495,7 +501,7 @@ where behind_tip_limit = ?zs::MAX_BLOCK_REORG_HEIGHT, "synced block height behind the finalized tip: dropped downloaded block" ); - metrics::counter!("gossip.min.height.limit.dropped.block.count", 1); + metrics::counter!("gossip.min.height.limit.dropped.block.count").increment(1); Err(BlockDownloadVerifyError::BehindTipHeightLimit { height: block_height, hash })?; } @@ -507,7 +513,7 @@ where biased; _ = &mut cancel_rx => { trace!("task cancelled waiting for verifier service readiness"); - metrics::counter!("sync.cancelled.verify.ready.count", 1); + metrics::counter!("sync.cancelled.verify.ready.count").increment(1); return Err(BlockDownloadVerifyError::CancelledAwaitingVerifierReadiness { height: block_height, hash }) } verifier = readiness => verifier, @@ -530,14 +536,14 @@ where biased; _ = &mut cancel_rx => { trace!("task cancelled prior to verification"); - metrics::counter!("sync.cancelled.verify.count", 1); + metrics::counter!("sync.cancelled.verify.count").increment(1); return Err(BlockDownloadVerifyError::CancelledDuringVerification { height: block_height, hash }) } verification = rsp => verification, }; if verification.is_ok() { - metrics::counter!("sync.verified.block.count", 1); + metrics::counter!("sync.verified.block.count").increment(1); } verification @@ -571,14 +577,26 @@ where pub fn cancel_all(&mut self) { // Replace the pending task list with an empty one and drop it. let _ = std::mem::take(&mut self.pending); + // Signal cancellation to all running tasks. // Since we already dropped the JoinHandles above, they should // fail silently. for (_hash, cancel) in self.cancel_handles.drain() { let _ = cancel.send(()); } + assert!(self.pending.is_empty()); assert!(self.cancel_handles.is_empty()); + + // Set the lookahead limit to false, since we're empty (so we're under the limit). + // + // It is ok to block here, because we're doing a reset and sleep anyway. + // But if Zebra is shutting down, ignore the send error. + let _ = self + .past_lookahead_limit_sender + .lock() + .expect("thread panicked while holding the past_lookahead_limit_sender mutex guard") + .send(false); } /// Get the number of currently in-flight download and verify tasks. diff --git a/zebrad/src/components/sync/end_of_support.rs b/zebrad/src/components/sync/end_of_support.rs index f7ca757d2b3..6572cfe459a 100644 --- a/zebrad/src/components/sync/end_of_support.rs +++ b/zebrad/src/components/sync/end_of_support.rs @@ -13,7 +13,7 @@ use zebra_chain::{ use crate::application::release_version; /// The estimated height that this release will be published. -pub const ESTIMATED_RELEASE_HEIGHT: u32 = 2_264_000; +pub const ESTIMATED_RELEASE_HEIGHT: u32 = 2_413_000; /// The maximum number of days after `ESTIMATED_RELEASE_HEIGHT` where a Zebra server will run /// without halting. diff --git a/zebrad/src/components/sync/tests/timing.rs b/zebrad/src/components/sync/tests/timing.rs index 006c08c4e51..61d5bac60ea 100644 --- a/zebrad/src/components/sync/tests/timing.rs +++ b/zebrad/src/components/sync/tests/timing.rs @@ -1,11 +1,8 @@ //! Check the relationship between various sync timeouts and delays. -use std::{ - convert::TryInto, - sync::{ - atomic::{AtomicU8, Ordering}, - Arc, - }, +use std::sync::{ + atomic::{AtomicU8, Ordering}, + Arc, }; use futures::future; @@ -79,10 +76,7 @@ fn ensure_timeouts_consistent() { ); assert!( - SYNC_RESTART_DELAY.as_secs() - < POST_BLOSSOM_POW_TARGET_SPACING - .try_into() - .expect("not negative"), + SYNC_RESTART_DELAY.as_secs() < POST_BLOSSOM_POW_TARGET_SPACING.into(), "a syncer tip crawl should complete before most new blocks" ); diff --git a/zebrad/src/config.rs b/zebrad/src/config.rs index 9a8f9ce1ca8..c0b22479c36 100644 --- a/zebrad/src/config.rs +++ b/zebrad/src/config.rs @@ -11,20 +11,33 @@ use serde::{Deserialize, Serialize}; /// The `zebrad` config is a TOML-encoded version of this structure. The meaning /// of each field is described in the documentation, although it may be necessary /// to click through to the sub-structures for each section. -#[derive(Clone, Default, Debug, Deserialize, Serialize)] +/// +/// The path to the configuration file can also be specified with the `--config` flag when running Zebra. +/// +/// The default path to the `zebrad` config is platform dependent, based on +/// [`dirs::preference_dir`](https://docs.rs/dirs/latest/dirs/fn.preference_dir.html): +/// +/// | Platform | Value | Example | +/// | -------- | ------------------------------------- | ---------------------------------------------- | +/// | Linux | `$XDG_CONFIG_HOME` or `$HOME/.config` | `/home/alice/.config/zebrad.toml` | +/// | macOS | `$HOME/Library/Preferences` | `/Users/Alice/Library/Preferences/zebrad.toml` | +/// | Windows | `{FOLDERID_RoamingAppData}` | `C:\Users\Alice\AppData\Local\zebrad.toml` | +#[derive(Clone, Default, Debug, Eq, PartialEq, Deserialize, Serialize)] #[serde(deny_unknown_fields, default)] pub struct ZebradConfig { /// Consensus configuration - pub consensus: zebra_consensus::Config, + // + // These configs use full paths to avoid a rustdoc link bug (#7048). + pub consensus: zebra_consensus::config::Config, /// Metrics configuration pub metrics: crate::components::metrics::Config, /// Networking configuration - pub network: zebra_network::Config, + pub network: zebra_network::config::Config, /// State configuration - pub state: zebra_state::Config, + pub state: zebra_state::config::Config, /// Tracing configuration pub tracing: crate::components::tracing::Config, @@ -41,4 +54,8 @@ pub struct ZebradConfig { #[serde(skip_serializing_if = "zebra_rpc::config::mining::Config::skip_getblocktemplate")] /// Mining configuration pub mining: zebra_rpc::config::mining::Config, + + #[cfg(feature = "shielded-scan")] + /// Scanner configuration + pub shielded_scan: zebra_scan::config::Config, } diff --git a/zebrad/src/lib.rs b/zebrad/src/lib.rs index b36ca2e182a..66338891559 100644 --- a/zebrad/src/lib.rs +++ b/zebrad/src/lib.rs @@ -1,13 +1,7 @@ //! ![Zebra logotype](https://zfnd.org/wp-content/uploads/2022/03/zebra-logotype.png) //! -//! Zebra is a Zcash node written in Rust. -//! -//! The `zebrad` binary uses a collection of `zebra-*` crates, -//! which implement the different components of a Zcash node -//! (networking, chain structures, validation, rpc, etc). -//! -//! [Rendered docs from the `main` branch](https://doc.zebra.zfnd.org). -//! [Join us on the Zcash Foundation Engineering Discord](https://discord.gg/na6QZNd). +//! Zebra is a Zcash full node written in Rust. Follow the [introductory +//! page](https://zebra.zfnd.org/index.html#documentation) in the Zebra Book to learn more. //! //! ## About Zcash //! @@ -51,9 +45,21 @@ //! - Additional contexts: wider target deployments for people to use a consensus //! node in more contexts e.g. mobile, wasm, etc. //! +//! ## Configuration +//! +//! The command below places the generated `zebrad.toml` config file in the default preferences directory of Linux: +//! +//! ```console +//! zebrad generate -o ~/.config/zebrad.toml +//! ``` +//! +//! See [`config::ZebradConfig`] for other OSes default locations or more information about how to configure Zebra. +//! //! ## Zebra Feature Flags //! -//! The following `zebrad` feature flags are available at compile time: +//! The following [Cargo +//! features](https://doc.rust-lang.org/cargo/reference/features.html#command-line-feature-options) +//! are available at compile time: //! //! ### JSON-RPC //! @@ -98,24 +104,42 @@ //! ### Experimental //! //! * `elasticsearch`: save block data into elasticsearch database. Read the [elasticsearch](https://zebra.zfnd.org/user/elasticsearch.html) -//! section of the book for more details. +//! section of the book for more details. +//! * `shielded-scan`: enable experimental support for scanning shielded transactions. Read the [shielded-scan](https://zebra.zfnd.org/user/shielded-scan.html) +//! section of the book for more details. +//! * `internal-miner`: enable experimental support for mining inside Zebra, without an external +//! mining pool. This feature is only supported on testnet. Use a GPU or ASIC on mainnet for +//! efficient mining. +//! +//! ## Zebra crates +//! +//! [The Zebra monorepo](https://github.com/ZcashFoundation/zebra) is a collection of the following +//! crates: +//! +//! - [tower-batch-control](https://docs.rs/tower-batch-control/latest/tower_batch_control/) +//! - [tower-fallback](https://docs.rs/tower-fallback/latest/tower_fallback/) +//! - [zebra-chain](https://docs.rs/zebra-chain/latest/zebra_chain/) +//! - [zebra-consensus](https://docs.rs/zebra-consensus/latest/zebra_consensus/) +//! - [zebra-network](https://docs.rs/zebra-network/latest/zebra_network/) +//! - [zebra-node-services](https://docs.rs/zebra-node-services/latest/zebra_node_services/) +//! - [zebra-rpc](https://docs.rs/zebra-rpc/latest/zebra_rpc/) +//! - [zebra-scan](https://docs.rs/zebra-scan/latest/zebra_scan/) +//! - [zebra-script](https://docs.rs/zebra-script/latest/zebra_script/) +//! - [zebra-state](https://docs.rs/zebra-state/latest/zebra_state/) +//! - [zebra-test](https://docs.rs/zebra-test/latest/zebra_test/) +//! - [zebra-utils](https://docs.rs/zebra-utils/latest/zebra_utils/) +//! - [zebrad](https://docs.rs/zebrad/latest/zebrad/) +//! +//! The links in the list above point to the documentation of the public APIs of the crates. For +//! the documentation of the internal APIs, follow that lists +//! all Zebra crates as well in the left sidebar. #![doc(html_favicon_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-favicon-128.png")] #![doc(html_logo_url = "https://zfnd.org/wp-content/uploads/2022/03/zebra-icon.png")] -#![doc(html_root_url = "https://doc.zebra.zfnd.org/zebrad")] +#![doc(html_root_url = "https://docs.rs/zebrad")] // Tracing causes false positives on this lint: // https://github.com/tokio-rs/tracing/issues/553 #![allow(clippy::cognitive_complexity)] -// -// Rust 1.72 has a false positive when nested generics are used inside Arc. -// This makes the `arc_with_non_send_sync` lint trigger on a lot of proptest code. -// -// TODO: remove this allow when Rust 1.73 is stable, because this lint bug is fixed in that release: -// -#![cfg_attr( - any(test, feature = "proptest-impl"), - allow(clippy::arc_with_non_send_sync) -)] #[macro_use] extern crate tracing; diff --git a/zebrad/systemd/zebrad.service b/zebrad/systemd/zebrad.service index 96d3e602338..92f9091fa62 100644 --- a/zebrad/systemd/zebrad.service +++ b/zebrad/systemd/zebrad.service @@ -22,6 +22,8 @@ ExecStart=/usr/bin/zebrad start Restart=always PrivateTmp=true NoNewPrivileges=true +StandardOutput=journal +StandardError=journal [Install] Alias=zebrad diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index c51bdc98949..0aad78c4eb7 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -109,17 +109,31 @@ //! Example of how to run the get_block_template test: //! //! ```console -//! ZEBRA_CACHED_STATE_DIR=/path/to/zebra/state cargo test get_block_template --features getblocktemplate-rpcs --release -- --ignored --nocapture +//! ZEBRA_CACHED_STATE_DIR=/path/to/zebra/state cargo test get_block_template --features getblocktemplate-rpcs --release -- --ignored --nocapture //! ``` //! //! Example of how to run the submit_block test: //! //! ```console -//! ZEBRA_CACHED_STATE_DIR=/path/to/zebra/state cargo test submit_block --features getblocktemplate-rpcs --release -- --ignored --nocapture +//! ZEBRA_CACHED_STATE_DIR=/path/to/zebra/state cargo test submit_block --features getblocktemplate-rpcs --release -- --ignored --nocapture //! ``` //! //! Please refer to the documentation of each test for more information. //! +//! ## Shielded scanning tests +//! +//! Example of how to run the scans_for_new_key test: +//! +//! ```console +//! ZEBRA_CACHED_STATE_DIR=/path/to/zebra/state cargo test scans_for_new_key --features shielded-scan --release -- --ignored --nocapture +//! ``` +//! +//! Example of how to run the scan_subscribe_results test: +//! +//! ```console +//! ZEBRA_CACHED_STATE_DIR=/path/to/zebra/state cargo test scan_subscribe_results --features shielded-scan -- --ignored --nocapture +//! ``` +//! //! ## Checkpoint Generation Tests //! //! Generate checkpoints on mainnet and testnet using a cached state: @@ -159,7 +173,7 @@ use zebra_chain::{ }; use zebra_network::constants::PORT_IN_USE_ERROR; use zebra_node_services::rpc_client::RpcRequestClient; -use zebra_state::{constants::LOCK_FILE_ERROR, database_format_version_in_code}; +use zebra_state::{constants::LOCK_FILE_ERROR, state_database_format_version_in_code}; use zebra_test::{ args, @@ -204,6 +218,9 @@ pub const MAX_ASYNC_BLOCKING_TIME: Duration = zebra_test::mock_service::DEFAULT_ /// The test config file prefix for `--feature getblocktemplate-rpcs` configs. pub const GET_BLOCK_TEMPLATE_CONFIG_PREFIX: &str = "getblocktemplate-"; +/// The test config file prefix for `--feature shielded-scan` configs. +pub const SHIELDED_SCAN_CONFIG_PREFIX: &str = "shieldedscan-"; + #[test] fn generate_no_args() -> Result<()> { let _init_guard = zebra_test::init(); @@ -806,18 +823,18 @@ fn last_config_is_stored() -> Result<()> { zebrad generate | \n\ sed 's/cache_dir = \".*\"/cache_dir = \"cache_dir\"/' > \n\ zebrad/tests/common/configs/{}.toml", - if cfg!(feature = "getblocktemplate-rpcs") { - GET_BLOCK_TEMPLATE_CONFIG_PREFIX + if cfg!(feature = "shielded-scan") { + SHIELDED_SCAN_CONFIG_PREFIX } else { "" }, - if cfg!(feature = "getblocktemplate-rpcs") { - "--features=getblocktemplate-rpcs " + if cfg!(feature = "shielded-scan") { + "--features=shielded-scan " } else { "" }, - if cfg!(feature = "getblocktemplate-rpcs") { - GET_BLOCK_TEMPLATE_CONFIG_PREFIX + if cfg!(feature = "shielded-scan") { + SHIELDED_SCAN_CONFIG_PREFIX } else { "" }, @@ -946,6 +963,14 @@ fn stored_configs_work() -> Result<()> { continue; } + // ignore files starting with shieldedscan prefix + // if we were not built with the shielded-scan feature. + #[cfg(not(feature = "shielded-scan"))] + if config_file_name.starts_with(SHIELDED_SCAN_CONFIG_PREFIX) { + tracing::info!(?config_file_path, "skipping shielded-scan config file path"); + continue; + } + let run_dir = testdir()?; let stored_config_path = config_file_full_path(config_file.path()); @@ -1167,8 +1192,6 @@ fn create_cached_database(network: Network) -> Result<()> { create_cached_database_height( network, height, - // We don't need the ZK parameters, we're only using checkpoints - true, // Use checkpoints to increase sync performance while caching the database true, // Check that we're still using checkpoints when we finish the cached sync @@ -1185,8 +1208,6 @@ fn sync_past_mandatory_checkpoint(network: Network) -> Result<()> { create_cached_database_height( network, height.unwrap(), - // We need the ZK parameters for full validation - false, // Test full validation by turning checkpoints off false, // Check that we're doing full validation when we finish the cached sync @@ -1216,8 +1237,6 @@ fn full_sync_test(network: Network, timeout_argument_name: &str) -> Result<()> { network, // Just keep going until we reach the chain tip block::Height::MAX, - // We need the ZK parameters for full validation - false, // Use the checkpoints to sync quickly, then do full validation until the chain tip true, // Finish when we reach the chain tip @@ -1672,13 +1691,11 @@ fn non_blocking_logger() -> Result<()> { Ok(()) }); - // Wait until the spawned task finishes or return an error in 45 seconds - if done_rx.recv_timeout(Duration::from_secs(45)).is_err() { - return Err(eyre!("unexpected test task hang")); + // Wait until the spawned task finishes up to 45 seconds before shutting down tokio runtime + if done_rx.recv_timeout(Duration::from_secs(45)).is_ok() { + rt.shutdown_timeout(Duration::from_secs(3)); } - rt.shutdown_timeout(Duration::from_secs(3)); - match test_task_handle.now_or_never() { Some(Ok(result)) => result, Some(Err(error)) => Err(eyre!("join error: {:?}", error)), @@ -1862,7 +1879,7 @@ fn lightwalletd_integration_test(test_type: TestType) -> Result<()> { wait_for_state_version_upgrade( &mut zebrad, &state_version_message, - database_format_version_in_code(), + state_database_format_version_in_code(), [format!( "Opened RPC endpoint at {}", zebra_rpc_address.expect("lightwalletd test must have RPC port") @@ -1872,7 +1889,7 @@ fn lightwalletd_integration_test(test_type: TestType) -> Result<()> { wait_for_state_version_upgrade( &mut zebrad, &state_version_message, - database_format_version_in_code(), + state_database_format_version_in_code(), None, )?; } @@ -1984,7 +2001,7 @@ fn lightwalletd_integration_test(test_type: TestType) -> Result<()> { wait_for_state_version_upgrade( &mut zebrad, &state_version_message, - database_format_version_in_code(), + state_database_format_version_in_code(), None, )?; } @@ -2010,7 +2027,7 @@ fn lightwalletd_integration_test(test_type: TestType) -> Result<()> { wait_for_state_version_upgrade( &mut zebrad, &state_version_message, - database_format_version_in_code(), + state_database_format_version_in_code(), None, )?; } @@ -2198,7 +2215,7 @@ fn zebra_state_conflict() -> Result<()> { dir_conflict_full.push("state"); dir_conflict_full.push(format!( "v{}", - zebra_state::database_format_version_in_code().major, + zebra_state::state_database_format_version_in_code().major, )); dir_conflict_full.push(config.network.network.to_string().to_lowercase()); format!( @@ -2337,8 +2354,8 @@ async fn fully_synced_rpc_test() -> Result<()> { Ok(()) } -#[tokio::test] -async fn delete_old_databases() -> Result<()> { +#[test] +fn delete_old_databases() -> Result<()> { use std::fs::{canonicalize, create_dir}; let _init_guard = zebra_test::init(); @@ -2385,7 +2402,7 @@ async fn delete_old_databases() -> Result<()> { // inside dir was deleted child.expect_stdout_line_matches(format!( - "deleted outdated state directory deleted_state={canonicalized_inside_dir:?}" + "deleted outdated state database directory.*deleted_db.*=.*{canonicalized_inside_dir:?}" ))?; assert!(!inside_dir.as_path().exists()); @@ -2532,7 +2549,7 @@ async fn new_state_format() -> Result<()> { /// (or just add a delay during tests) #[tokio::test] async fn update_state_format() -> Result<()> { - let mut fake_version = database_format_version_in_code(); + let mut fake_version = state_database_format_version_in_code(); fake_version.minor = 0; fake_version.patch = 0; @@ -2549,7 +2566,7 @@ async fn update_state_format() -> Result<()> { /// Future version compatibility is a best-effort attempt, this test can be disabled if it fails. #[tokio::test] async fn downgrade_state_format() -> Result<()> { - let mut fake_version = database_format_version_in_code(); + let mut fake_version = state_database_format_version_in_code(); fake_version.minor = u16::MAX.into(); fake_version.patch = 0; @@ -2631,13 +2648,17 @@ async fn state_format_test( .zebrad_config(test_name, false, Some(dir.path()), network) .expect("already checked config")?; - zebra_state::write_database_format_version_to_disk(fake_version, &config.state, network) - .expect("can't write fake database version to disk"); + zebra_state::write_state_database_format_version_to_disk( + &config.state, + fake_version, + network, + ) + .expect("can't write fake database version to disk"); // Give zebra_state enough time to actually write the database version to disk. tokio::time::sleep(Duration::from_secs(1)).await; - let running_version = database_format_version_in_code(); + let running_version = state_database_format_version_in_code(); match fake_version.cmp(&running_version) { Ordering::Less => expect_older_version = true, @@ -2743,7 +2764,7 @@ async fn fully_synced_rpc_z_getsubtreesbyindex_snapshot_test() -> Result<()> { wait_for_state_version_upgrade( &mut zebrad, &state_version_message, - database_format_version_in_code(), + state_database_format_version_in_code(), None, )?; @@ -2805,3 +2826,211 @@ async fn fully_synced_rpc_z_getsubtreesbyindex_snapshot_test() -> Result<()> { Ok(()) } + +/// Test that the scanner task gets started when the node starts. +#[test] +#[cfg(feature = "shielded-scan")] +fn scan_task_starts() -> Result<()> { + use indexmap::IndexMap; + use zebra_scan::tests::ZECPAGES_SAPLING_VIEWING_KEY; + + let _init_guard = zebra_test::init(); + + let test_type = TestType::LaunchWithEmptyState { + launches_lightwalletd: false, + }; + let mut config = default_test_config(Mainnet)?; + let mut keys = IndexMap::new(); + keys.insert(ZECPAGES_SAPLING_VIEWING_KEY.to_string(), 1); + config.shielded_scan.sapling_keys_to_scan = keys; + + // Start zebra with the config. + let mut zebrad = testdir()? + .with_exact_config(&config)? + .spawn_child(args!["start"])? + .with_timeout(test_type.zebrad_timeout()); + + // Check scanner was started. + zebrad.expect_stdout_line_matches("loaded Zebra scanner cache")?; + + // Look for 2 scanner notices indicating we are below sapling activation. + zebrad.expect_stdout_line_matches("scanner is waiting for Sapling activation. Current tip: [0-9]{1,4}, Sapling activation: 419200")?; + zebrad.expect_stdout_line_matches("scanner is waiting for Sapling activation. Current tip: [0-9]{1,4}, Sapling activation: 419200")?; + + // Kill the node. + zebrad.kill(false)?; + + // Check that scan task started and the first scanning is done. + let output = zebrad.wait_with_output()?; + + // Make sure the command was killed + output.assert_was_killed()?; + output.assert_failure()?; + + Ok(()) +} + +/// Test that the scanner gRPC server starts when the node starts. +#[tokio::test] +#[cfg(feature = "shielded-scan")] +async fn scan_rpc_server_starts() -> Result<()> { + use zebra_grpc::scanner::{scanner_client::ScannerClient, Empty}; + + let _init_guard = zebra_test::init(); + + let test_type = TestType::LaunchWithEmptyState { + launches_lightwalletd: false, + }; + + let port = random_known_port(); + let listen_addr = format!("127.0.0.1:{port}"); + let mut config = default_test_config(Mainnet)?; + config.shielded_scan.listen_addr = Some(listen_addr.parse()?); + + // Start zebra with the config. + let mut zebrad = testdir()? + .with_exact_config(&config)? + .spawn_child(args!["start"])? + .with_timeout(test_type.zebrad_timeout()); + + // Wait until gRPC server is starting. + tokio::time::sleep(LAUNCH_DELAY).await; + zebrad.expect_stdout_line_matches("starting scan gRPC server")?; + tokio::time::sleep(Duration::from_secs(1)).await; + + let mut client = ScannerClient::connect(format!("http://{listen_addr}")).await?; + + let request = tonic::Request::new(Empty {}); + + client.get_info(request).await?; + + // Kill the node. + zebrad.kill(false)?; + + // Check that scan task started and the first scanning is done. + let output = zebrad.wait_with_output()?; + + // Make sure the command was killed + output.assert_was_killed()?; + output.assert_failure()?; + + Ok(()) +} + +/// Test that the scanner can continue scanning where it was left when zebrad restarts. +/// +/// Needs a cache state close to the tip. A possible way to run it locally is: +/// +/// export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/state" +/// cargo test scan_start_where_left --features="shielded-scan" -- --ignored --nocapture +/// +/// The test will run zebrad with a key to scan, scan the first few blocks after sapling and then stops. +/// Then it will restart zebrad and check that it resumes scanning where it was left. +/// +/// Note: This test will remove all the contents you may have in the ZEBRA_CACHED_STATE_DIR/private-scan directory +/// so it can start with an empty scanning state. +#[ignore] +#[test] +#[cfg(feature = "shielded-scan")] +fn scan_start_where_left() -> Result<()> { + use indexmap::IndexMap; + use zebra_scan::{storage::db::SCANNER_DATABASE_KIND, tests::ZECPAGES_SAPLING_VIEWING_KEY}; + + let _init_guard = zebra_test::init(); + + // use `UpdateZebraCachedStateNoRpc` as the test type to make sure a zebrad cache state is available. + let test_type = TestType::UpdateZebraCachedStateNoRpc; + if let Some(cache_dir) = test_type.zebrad_state_path("scan test") { + // Add a key to the config + let mut config = default_test_config(Mainnet)?; + let mut keys = IndexMap::new(); + keys.insert(ZECPAGES_SAPLING_VIEWING_KEY.to_string(), 1); + config.shielded_scan.sapling_keys_to_scan = keys; + + // Add the cache dir to shielded scan, make it the same as the zebrad cache state. + config.shielded_scan.db_config_mut().cache_dir = cache_dir.clone(); + config.shielded_scan.db_config_mut().ephemeral = false; + + // Add the cache dir to state. + config.state.cache_dir = cache_dir.clone(); + config.state.ephemeral = false; + + // Remove the scan directory before starting. + let scan_db_path = cache_dir.join(SCANNER_DATABASE_KIND); + fs::remove_dir_all(std::path::Path::new(&scan_db_path)).ok(); + + // Start zebra with the config. + let mut zebrad = testdir()? + .with_exact_config(&config)? + .spawn_child(args!["start"])? + .with_timeout(test_type.zebrad_timeout()); + + // Check scanner was started. + zebrad.expect_stdout_line_matches("loaded Zebra scanner cache")?; + + // The first time + zebrad.expect_stdout_line_matches( + r"Scanning the blockchain for key 0, started at block 419200, now at block 420000", + )?; + + // Make sure scanner scans a few blocks. + zebrad.expect_stdout_line_matches( + r"Scanning the blockchain for key 0, started at block 419200, now at block 430000", + )?; + zebrad.expect_stdout_line_matches( + r"Scanning the blockchain for key 0, started at block 419200, now at block 440000", + )?; + + // Kill the node. + zebrad.kill(false)?; + let output = zebrad.wait_with_output()?; + + // Make sure the command was killed + output.assert_was_killed()?; + output.assert_failure()?; + + // Start the node again. + let mut zebrad = testdir()? + .with_exact_config(&config)? + .spawn_child(args!["start"])? + .with_timeout(test_type.zebrad_timeout()); + + // Resuming message. + zebrad.expect_stdout_line_matches( + "Last scanned height for key number 0 is 439000, resuming at 439001", + )?; + zebrad.expect_stdout_line_matches("loaded Zebra scanner cache")?; + + // Start scanning where it was left. + zebrad.expect_stdout_line_matches( + r"Scanning the blockchain for key 0, started at block 439001, now at block 440000", + )?; + zebrad.expect_stdout_line_matches( + r"Scanning the blockchain for key 0, started at block 439001, now at block 450000", + )?; + } + + Ok(()) +} + +/// Test successful registration of a new key in the scan task. +/// +/// See [`common::shielded_scan::scans_for_new_key`] for more information. +// TODO: Add this test to CI (#8236) +#[tokio::test] +#[ignore] +#[cfg(feature = "shielded-scan")] +async fn scans_for_new_key() -> Result<()> { + common::shielded_scan::scans_for_new_key::run().await +} + +/// Tests SubscribeResults ScanService request. +/// +/// See [`common::shielded_scan::subscribe_results`] for more information. +// TODO: Add this test to CI (#8236) +#[tokio::test] +#[ignore] +#[cfg(feature = "shielded-scan")] +async fn scan_subscribe_results() -> Result<()> { + common::shielded_scan::subscribe_results::run().await +} diff --git a/zebrad/tests/common/checkpoints.rs b/zebrad/tests/common/checkpoints.rs index a1aa1dbb43c..c67130845e4 100644 --- a/zebrad/tests/common/checkpoints.rs +++ b/zebrad/tests/common/checkpoints.rs @@ -20,7 +20,7 @@ use zebra_chain::{ }; use zebra_consensus::MAX_CHECKPOINT_HEIGHT_GAP; use zebra_node_services::rpc_client::RpcRequestClient; -use zebra_state::database_format_version_in_code; +use zebra_state::state_database_format_version_in_code; use zebra_test::{ args, command::{Arguments, TestDirExt, NO_MATCHES_REGEX_ITER}, @@ -98,7 +98,7 @@ pub async fn run(network: Network) -> Result<()> { wait_for_state_version_upgrade( &mut zebrad, &state_version_message, - database_format_version_in_code(), + state_database_format_version_in_code(), None, )?; } @@ -396,8 +396,7 @@ pub fn wait_for_zebra_checkpoints_generation< test_type: TestType, show_zebrad_logs: bool, ) -> Result<(TestChild, TestChild

)> { - let last_checkpoint_gap = HeightDiff::try_from(MIN_TRANSPARENT_COINBASE_MATURITY) - .expect("constant fits in HeightDiff") + let last_checkpoint_gap = HeightDiff::from(MIN_TRANSPARENT_COINBASE_MATURITY) + HeightDiff::try_from(MAX_CHECKPOINT_HEIGHT_GAP).expect("constant fits in HeightDiff"); let expected_final_checkpoint_height = (zebra_tip_height - last_checkpoint_gap).expect("network tip is high enough"); diff --git a/zebrad/tests/common/config.rs b/zebrad/tests/common/config.rs index 3abf83d7eca..a67f1d484c6 100644 --- a/zebrad/tests/common/config.rs +++ b/zebrad/tests/common/config.rs @@ -15,7 +15,7 @@ use std::{ use color_eyre::eyre::Result; use tempfile::TempDir; -use zebra_chain::parameters::Network::{self, *}; +use zebra_chain::parameters::Network; use zebra_test::net::random_known_port; use zebrad::{ components::{mempool, sync, tracing}, @@ -51,10 +51,7 @@ pub fn default_test_config(net: Network) -> Result { ..mempool::Config::default() }; - let consensus = zebra_consensus::Config { - debug_skip_parameter_preload: true, - ..zebra_consensus::Config::default() - }; + let consensus = zebra_consensus::Config::default(); let force_use_color = !matches!( env::var("ZEBRA_FORCE_USE_COLOR"), @@ -73,16 +70,38 @@ pub fn default_test_config(net: Network) -> Result { #[cfg(feature = "getblocktemplate-rpcs")] { - let miner_address = if network.network == Mainnet { - "t3dvVE3SQEi7kqNzwrfNePxZ1d4hUyztBA1" - } else { + let miner_address = if network.network.is_a_test_network() { + // Assume test networks all use the same address prefix and format "t27eWDgjFYJGVXmzrXeVjnb5J3uXDM9xH9v" + } else { + "t3dvVE3SQEi7kqNzwrfNePxZ1d4hUyztBA1" }; mining.miner_address = Some(miner_address.parse().expect("hard-coded address is valid")); } - let config = ZebradConfig { + #[cfg(feature = "shielded-scan")] + { + let mut shielded_scan = zebra_scan::Config::ephemeral(); + shielded_scan.db_config_mut().cache_dir = "zebra-scan".into(); + + let config = ZebradConfig { + network, + state, + sync, + mempool, + consensus, + tracing, + mining, + shielded_scan, + ..ZebradConfig::default() + }; + + Ok(config) + } + + #[cfg(not(feature = "shielded-scan"))] + Ok(ZebradConfig { network, state, sync, @@ -91,9 +110,7 @@ pub fn default_test_config(net: Network) -> Result { tracing, mining, ..ZebradConfig::default() - }; - - Ok(config) + }) } pub fn persistent_test_config(network: Network) -> Result { @@ -137,7 +154,7 @@ pub fn random_known_rpc_port_config( let mut config = default_test_config(network)?; config.rpc.listen_addr = Some(zebra_rpc_listener); if parallel_cpu_threads { - // Auto-configure to the number of CPU cores: most users configre this + // Auto-configure to the number of CPU cores: most users configure this config.rpc.parallel_cpu_threads = 0; } else { // Default config, users who want to detect port conflicts configure this diff --git a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.1.toml b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.1.toml index b64dc7ed15e..1472bd432cd 100644 --- a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.1.toml +++ b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.1.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.2.toml b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.2.toml index ba578bd3f00..3397a033e6b 100644 --- a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.2.toml +++ b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.2.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.3.toml b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.3.toml index 379c491bf6a..e5ce9a5cfb3 100644 --- a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.3.toml +++ b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.3.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.4.toml b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.4.toml index 85406147154..f3f1d74bb79 100644 --- a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.4.toml +++ b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.4.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.5.toml b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.5.toml index 87ed3dad81e..a22a6b4e1f0 100644 --- a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.5.toml +++ b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.5.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.9.toml b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.9.toml index c6629087b72..ec7fe819bd0 100644 --- a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.9.toml +++ b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.9.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/getblocktemplate-v1.0.1.toml b/zebrad/tests/common/configs/getblocktemplate-v1.0.1.toml index 3536c80c9c8..5375f74e7b1 100644 --- a/zebrad/tests/common/configs/getblocktemplate-v1.0.1.toml +++ b/zebrad/tests/common/configs/getblocktemplate-v1.0.1.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/shieldedscan-v1.5.0.toml b/zebrad/tests/common/configs/shieldedscan-v1.5.0.toml new file mode 100644 index 00000000000..e701e9ff3e8 --- /dev/null +++ b/zebrad/tests/common/configs/shieldedscan-v1.5.0.toml @@ -0,0 +1,80 @@ +# Default configuration for zebrad. +# +# This file can be used as a skeleton for custom configs. +# +# Unspecified fields use default values. Optional fields are Some(field) if the +# field is present and None if it is absent. +# +# This file is generated as an example using zebrad's current defaults. +# You should set only the config options you want to keep, and delete the rest. +# Only a subset of fields are present in the skeleton, since optional values +# whose default is None are omitted. +# +# The config format (including a complete list of sections and fields) is +# documented here: +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html +# +# zebrad attempts to load configs in the following order: +# +# 1. The -c flag on the command line, e.g., `zebrad -c myconfig.toml start`; +# 2. The file `zebrad.toml` in the users's preference directory (platform-dependent); +# 3. The default config. + +[consensus] +checkpoint_sync = true + +[mempool] +eviction_memory_time = "1h" +tx_cost_limit = 80000000 + +[metrics] + +[mining] +debug_like_zcashd = true + +[network] +cache_dir = true +crawl_new_peer_interval = "1m 1s" +initial_mainnet_peers = [ + "dnsseed.z.cash:8233", + "dnsseed.str4d.xyz:8233", + "mainnet.seeder.zfnd.org:8233", + "mainnet.is.yolo.money:8233", +] +initial_testnet_peers = [ + "dnsseed.testnet.z.cash:18233", + "testnet.seeder.zfnd.org:18233", + "testnet.is.yolo.money:18233", +] +listen_addr = "0.0.0.0:8233" +max_connections_per_ip = 1 +network = "Mainnet" +peerset_initial_target_size = 25 + +[rpc] +debug_force_finished_sync = false +parallel_cpu_threads = 0 + +[shielded_scan] +cache_dir = "cache_dir" +delete_old_database = true +ephemeral = false + +[shielded_scan.sapling_keys_to_scan] + +[state] +cache_dir = "cache_dir" +delete_old_database = true +ephemeral = false + +[sync] +checkpoint_verify_concurrency_limit = 1000 +download_concurrency_limit = 50 +full_verify_concurrency_limit = 20 +parallel_cpu_threads = 0 + +[tracing] +buffer_limit = 128000 +force_use_color = false +use_color = true +use_journald = false diff --git a/zebrad/tests/common/configs/shieldedscan-v1.6.0.toml b/zebrad/tests/common/configs/shieldedscan-v1.6.0.toml new file mode 100644 index 00000000000..7cd2eb43f92 --- /dev/null +++ b/zebrad/tests/common/configs/shieldedscan-v1.6.0.toml @@ -0,0 +1,89 @@ +# Default configuration for zebrad. +# +# This file can be used as a skeleton for custom configs. +# +# Unspecified fields use default values. Optional fields are Some(field) if the +# field is present and None if it is absent. +# +# This file is generated as an example using zebrad's current defaults. +# You should set only the config options you want to keep, and delete the rest. +# Only a subset of fields are present in the skeleton, since optional values +# whose default is None are omitted. +# +# The config format (including a complete list of sections and fields) is +# documented here: +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html +# +# zebrad attempts to load configs in the following order: +# +# 1. The -c flag on the command line, e.g., `zebrad -c myconfig.toml start`; +# 2. The file `zebrad.toml` in the users's preference directory (platform-dependent); +# 3. The default config. +# +# The user's preference directory and the default path to the `zebrad` config are platform dependent, +# based on `dirs::preference_dir`, see https://docs.rs/dirs/latest/dirs/fn.preference_dir.html : +# +# | Platform | Value | Example | +# | -------- | ------------------------------------- | ---------------------------------------------- | +# | Linux | `$XDG_CONFIG_HOME` or `$HOME/.config` | `/home/alice/.config/zebrad.toml` | +# | macOS | `$HOME/Library/Preferences` | `/Users/Alice/Library/Preferences/zebrad.toml` | +# | Windows | `{FOLDERID_RoamingAppData}` | `C:\Users\Alice\AppData\Local\zebrad.toml` | + +[consensus] +checkpoint_sync = true + +[mempool] +eviction_memory_time = "1h" +tx_cost_limit = 80000000 + +[metrics] + +[mining] +debug_like_zcashd = true + +[network] +cache_dir = true +crawl_new_peer_interval = "1m 1s" +initial_mainnet_peers = [ + "dnsseed.z.cash:8233", + "dnsseed.str4d.xyz:8233", + "mainnet.seeder.zfnd.org:8233", + "mainnet.is.yolo.money:8233", +] +initial_testnet_peers = [ + "dnsseed.testnet.z.cash:18233", + "testnet.seeder.zfnd.org:18233", + "testnet.is.yolo.money:18233", +] +listen_addr = "0.0.0.0:8233" +max_connections_per_ip = 1 +network = "Mainnet" +peerset_initial_target_size = 25 + +[rpc] +debug_force_finished_sync = false +parallel_cpu_threads = 0 + +[shielded_scan] +cache_dir = "cache_dir" +delete_old_database = true +ephemeral = false + +[shielded_scan.sapling_keys_to_scan] + +[state] +cache_dir = "cache_dir" +delete_old_database = true +ephemeral = false + +[sync] +checkpoint_verify_concurrency_limit = 1000 +download_concurrency_limit = 50 +full_verify_concurrency_limit = 20 +parallel_cpu_threads = 0 + +[tracing] +buffer_limit = 128000 +force_use_color = false +use_color = true +use_journald = false \ No newline at end of file diff --git a/zebrad/tests/common/configs/v1.0.0-beta.12.toml b/zebrad/tests/common/configs/v1.0.0-beta.12.toml index 32421cf0bd2..86d8718877c 100644 --- a/zebrad/tests/common/configs/v1.0.0-beta.12.toml +++ b/zebrad/tests/common/configs/v1.0.0-beta.12.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/v1.0.0-beta.13.toml b/zebrad/tests/common/configs/v1.0.0-beta.13.toml index 14dc310ac59..9477f2f5deb 100644 --- a/zebrad/tests/common/configs/v1.0.0-beta.13.toml +++ b/zebrad/tests/common/configs/v1.0.0-beta.13.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/v1.0.0-beta.15.toml b/zebrad/tests/common/configs/v1.0.0-beta.15.toml index c38cc2530e9..5a10b5bdcbf 100644 --- a/zebrad/tests/common/configs/v1.0.0-beta.15.toml +++ b/zebrad/tests/common/configs/v1.0.0-beta.15.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/v1.0.0-rc.0.toml b/zebrad/tests/common/configs/v1.0.0-rc.0.toml index 72f8c6bc693..7363d588b70 100644 --- a/zebrad/tests/common/configs/v1.0.0-rc.0.toml +++ b/zebrad/tests/common/configs/v1.0.0-rc.0.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/v1.0.0-rc.2.toml b/zebrad/tests/common/configs/v1.0.0-rc.2.toml index 8622db79f88..3ebf86709f3 100644 --- a/zebrad/tests/common/configs/v1.0.0-rc.2.toml +++ b/zebrad/tests/common/configs/v1.0.0-rc.2.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/v1.0.0-rc.4.toml b/zebrad/tests/common/configs/v1.0.0-rc.4.toml index de63dd90d02..5dfc7d14391 100644 --- a/zebrad/tests/common/configs/v1.0.0-rc.4.toml +++ b/zebrad/tests/common/configs/v1.0.0-rc.4.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/v1.0.0-rc.9.toml b/zebrad/tests/common/configs/v1.0.0-rc.9.toml index 52cd503be0b..5a90632ea92 100644 --- a/zebrad/tests/common/configs/v1.0.0-rc.9.toml +++ b/zebrad/tests/common/configs/v1.0.0-rc.9.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/v1.0.1.toml b/zebrad/tests/common/configs/v1.0.1.toml index 02bac53da62..7c94c32c9b2 100644 --- a/zebrad/tests/common/configs/v1.0.1.toml +++ b/zebrad/tests/common/configs/v1.0.1.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/v1.3.0-progress-bars.toml b/zebrad/tests/common/configs/v1.3.0-progress-bars.toml index 82da5039fd8..a5801ba802b 100644 --- a/zebrad/tests/common/configs/v1.3.0-progress-bars.toml +++ b/zebrad/tests/common/configs/v1.3.0-progress-bars.toml @@ -12,7 +12,7 @@ # # The config format (including a complete list of sections and fields) is # documented here: -# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html # # zebrad attempts to load configs in the following order: # diff --git a/zebrad/tests/common/configs/v1.4.0.toml b/zebrad/tests/common/configs/v1.4.0.toml new file mode 100644 index 00000000000..26ee79452b5 --- /dev/null +++ b/zebrad/tests/common/configs/v1.4.0.toml @@ -0,0 +1,73 @@ +# Default configuration for zebrad. +# +# This file can be used as a skeleton for custom configs. +# +# Unspecified fields use default values. Optional fields are Some(field) if the +# field is present and None if it is absent. +# +# This file is generated as an example using zebrad's current defaults. +# You should set only the config options you want to keep, and delete the rest. +# Only a subset of fields are present in the skeleton, since optional values +# whose default is None are omitted. +# +# The config format (including a complete list of sections and fields) is +# documented here: +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html +# +# zebrad attempts to load configs in the following order: +# +# 1. The -c flag on the command line, e.g., `zebrad -c myconfig.toml start`; +# 2. The file `zebrad.toml` in the users's preference directory (platform-dependent); +# 3. The default config. + +[consensus] +checkpoint_sync = true + +[mempool] +eviction_memory_time = "1h" +tx_cost_limit = 80000000 + +[metrics] + +[mining] +debug_like_zcashd = true + +[network] +cache_dir = true +crawl_new_peer_interval = "1m 1s" +initial_mainnet_peers = [ + "dnsseed.z.cash:8233", + "dnsseed.str4d.xyz:8233", + "mainnet.seeder.zfnd.org:8233", + "mainnet.is.yolo.money:8233", +] +initial_testnet_peers = [ + "dnsseed.testnet.z.cash:18233", + "testnet.seeder.zfnd.org:18233", + "testnet.is.yolo.money:18233", +] +listen_addr = "0.0.0.0:8233" +max_connections_per_ip = 1 +network = "Mainnet" +peerset_initial_target_size = 25 + +[rpc] +debug_force_finished_sync = false +parallel_cpu_threads = 0 + +[state] +cache_dir = "cache_dir" +delete_old_database = true +ephemeral = false + +[sync] +checkpoint_verify_concurrency_limit = 1000 +download_concurrency_limit = 50 +full_verify_concurrency_limit = 20 +parallel_cpu_threads = 0 + +[tracing] +buffer_limit = 128000 +force_use_color = false +use_color = true +use_journald = false \ No newline at end of file diff --git a/zebrad/tests/common/configs/v1.6.0.toml b/zebrad/tests/common/configs/v1.6.0.toml new file mode 100644 index 00000000000..3f19b0830a2 --- /dev/null +++ b/zebrad/tests/common/configs/v1.6.0.toml @@ -0,0 +1,82 @@ +# Default configuration for zebrad. +# +# This file can be used as a skeleton for custom configs. +# +# Unspecified fields use default values. Optional fields are Some(field) if the +# field is present and None if it is absent. +# +# This file is generated as an example using zebrad's current defaults. +# You should set only the config options you want to keep, and delete the rest. +# Only a subset of fields are present in the skeleton, since optional values +# whose default is None are omitted. +# +# The config format (including a complete list of sections and fields) is +# documented here: +# https://docs.rs/zebrad/latest/zebrad/config/struct.ZebradConfig.html +# +# zebrad attempts to load configs in the following order: +# +# 1. The -c flag on the command line, e.g., `zebrad -c myconfig.toml start`; +# 2. The file `zebrad.toml` in the users's preference directory (platform-dependent); +# 3. The default config. +# +# The user's preference directory and the default path to the `zebrad` config are platform dependent, +# based on `dirs::preference_dir`, see https://docs.rs/dirs/latest/dirs/fn.preference_dir.html : +# +# | Platform | Value | Example | +# | -------- | ------------------------------------- | ---------------------------------------------- | +# | Linux | `$XDG_CONFIG_HOME` or `$HOME/.config` | `/home/alice/.config/zebrad.toml` | +# | macOS | `$HOME/Library/Preferences` | `/Users/Alice/Library/Preferences/zebrad.toml` | +# | Windows | `{FOLDERID_RoamingAppData}` | `C:\Users\Alice\AppData\Local\zebrad.toml` | + +[consensus] +checkpoint_sync = true + +[mempool] +eviction_memory_time = "1h" +tx_cost_limit = 80000000 + +[metrics] + +[mining] +debug_like_zcashd = true + +[network] +cache_dir = true +crawl_new_peer_interval = "1m 1s" +initial_mainnet_peers = [ + "dnsseed.z.cash:8233", + "dnsseed.str4d.xyz:8233", + "mainnet.seeder.zfnd.org:8233", + "mainnet.is.yolo.money:8233", +] +initial_testnet_peers = [ + "dnsseed.testnet.z.cash:18233", + "testnet.seeder.zfnd.org:18233", + "testnet.is.yolo.money:18233", +] +listen_addr = "0.0.0.0:8233" +max_connections_per_ip = 1 +network = "Mainnet" +peerset_initial_target_size = 25 + +[rpc] +debug_force_finished_sync = false +parallel_cpu_threads = 0 + +[state] +cache_dir = "cache_dir" +delete_old_database = true +ephemeral = false + +[sync] +checkpoint_verify_concurrency_limit = 1000 +download_concurrency_limit = 50 +full_verify_concurrency_limit = 20 +parallel_cpu_threads = 0 + +[tracing] +buffer_limit = 128000 +force_use_color = false +use_color = true +use_journald = false \ No newline at end of file diff --git a/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs b/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs index e6bd3d3d9c4..56a62ff4346 100644 --- a/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs +++ b/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs @@ -169,7 +169,7 @@ async fn try_validate_block_template(client: &RpcRequestClient) -> Result<()> { { let client = client.clone(); - let mut long_poll_id = response_json_result.long_poll_id.clone(); + let mut long_poll_id = response_json_result.long_poll_id; tokio::spawn(async move { loop { @@ -196,7 +196,7 @@ async fn try_validate_block_template(client: &RpcRequestClient) -> Result<()> { } long_poll_result = long_poll_request => { - long_poll_id = long_poll_result.long_poll_id.clone(); + long_poll_id = long_poll_result.long_poll_id; if let Some(false) = long_poll_result.submit_old { let _ = long_poll_result_tx.send(long_poll_result); diff --git a/zebrad/tests/common/lightwalletd/wallet_grpc_test.rs b/zebrad/tests/common/lightwalletd/wallet_grpc_test.rs index 3030f1c63ba..e341f1a2cd5 100644 --- a/zebrad/tests/common/lightwalletd/wallet_grpc_test.rs +++ b/zebrad/tests/common/lightwalletd/wallet_grpc_test.rs @@ -43,7 +43,7 @@ use zebra_chain::{ parameters::NetworkUpgrade::{Nu5, Sapling}, serialization::ZcashDeserializeInto, }; -use zebra_state::database_format_version_in_code; +use zebra_state::state_database_format_version_in_code; use crate::common::{ cached_state::{ @@ -122,7 +122,7 @@ pub async fn run() -> Result<()> { wait_for_state_version_upgrade( &mut zebrad, &state_version_message, - database_format_version_in_code(), + state_database_format_version_in_code(), [format!("Opened RPC endpoint at {zebra_rpc_address}")], )?; } @@ -159,7 +159,7 @@ pub async fn run() -> Result<()> { wait_for_state_version_upgrade( &mut zebrad, &state_version_message, - database_format_version_in_code(), + state_database_format_version_in_code(), None, )?; } diff --git a/zebrad/tests/common/mod.rs b/zebrad/tests/common/mod.rs index 77627d0275e..70383124834 100644 --- a/zebrad/tests/common/mod.rs +++ b/zebrad/tests/common/mod.rs @@ -23,3 +23,6 @@ pub mod checkpoints; #[cfg(feature = "getblocktemplate-rpcs")] pub mod get_block_template_rpcs; + +#[cfg(feature = "shielded-scan")] +pub mod shielded_scan; diff --git a/zebrad/tests/common/shielded_scan.rs b/zebrad/tests/common/shielded_scan.rs new file mode 100644 index 00000000000..e3478f180c0 --- /dev/null +++ b/zebrad/tests/common/shielded_scan.rs @@ -0,0 +1,4 @@ +//! Acceptance tests for `shielded-scan`` feature in zebrad. + +pub(crate) mod scans_for_new_key; +pub(crate) mod subscribe_results; diff --git a/zebrad/tests/common/shielded_scan/scans_for_new_key.rs b/zebrad/tests/common/shielded_scan/scans_for_new_key.rs new file mode 100644 index 00000000000..b7e5109c372 --- /dev/null +++ b/zebrad/tests/common/shielded_scan/scans_for_new_key.rs @@ -0,0 +1,120 @@ +//! Test registering and scanning for a new key in the scan task while zebrad is running. +//! +//! This test requires a cached chain state that is partially synchronized past the +//! Sapling activation height and [`REQUIRED_MIN_TIP_HEIGHT`] +//! +//! export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/state" +//! cargo test scans_for_new_key --release --features="shielded-scan" -- --ignored --nocapture + +use std::time::Duration; + +use color_eyre::{eyre::eyre, Result}; + +use tower::ServiceBuilder; +use zebra_chain::{ + block::Height, + chain_tip::ChainTip, + parameters::{Network, NetworkUpgrade}, +}; +use zebra_scan::{service::ScanTask, storage::Storage, tests::ZECPAGES_SAPLING_VIEWING_KEY}; + +use crate::common::{ + cached_state::start_state_service_with_cache_dir, launch::can_spawn_zebrad_for_test_type, + test_type::TestType, +}; + +/// The minimum required tip height for the cached state in this test. +const REQUIRED_MIN_TIP_HEIGHT: Height = Height(1_000_000); + +/// How long this test waits after registering keys to check if there are any results. +const WAIT_FOR_RESULTS_DURATION: Duration = Duration::from_secs(60); + +/// Initialize Zebra's state service with a cached state, add a new key to the scan task, and +/// check that it stores results for the new key without errors. +pub(crate) async fn run() -> Result<()> { + let _init_guard = zebra_test::init(); + + let test_type = TestType::UpdateZebraCachedStateNoRpc; + let test_name = "scans_for_new_key"; + let network = Network::Mainnet; + + // Skip the test unless the user specifically asked for it and there is a zebrad_state_path + if !can_spawn_zebrad_for_test_type(test_name, test_type, true) { + return Ok(()); + } + + tracing::info!( + ?network, + ?test_type, + "running scans_for_new_key test using zebra state service", + ); + + let zebrad_state_path = test_type + .zebrad_state_path(test_name) + .expect("already checked that there is a cached state path"); + + let shielded_scan_config = zebra_scan::Config::default(); + + let (state_service, _read_state_service, latest_chain_tip, chain_tip_change) = + start_state_service_with_cache_dir(network, zebrad_state_path).await?; + + let chain_tip_height = latest_chain_tip + .best_tip_height() + .ok_or_else(|| eyre!("State directory doesn't have a chain tip block"))?; + + let sapling_activation_height = NetworkUpgrade::Sapling + .activation_height(network) + .expect("there should be an activation height for Mainnet"); + + assert!( + sapling_activation_height < REQUIRED_MIN_TIP_HEIGHT, + "minimum tip height should be above sapling activation height" + ); + + assert!( + REQUIRED_MIN_TIP_HEIGHT < chain_tip_height, + "chain tip height must be above required minimum tip height" + ); + + tracing::info!("opened state service with valid chain tip height, deleting any past keys in db and starting scan task",); + + // Before spawning `ScanTask`, delete past results for the zecpages key, if any. + let mut storage = Storage::new(&shielded_scan_config, network, false); + storage.delete_sapling_keys(vec![ZECPAGES_SAPLING_VIEWING_KEY.to_string()]); + + let state = ServiceBuilder::new().buffer(10).service(state_service); + + let mut scan_task = ScanTask::spawn(storage, state, chain_tip_change); + + tracing::info!("started scan task, sending register keys message with zecpages key to start scanning for a new key",); + + scan_task.register_keys( + [(ZECPAGES_SAPLING_VIEWING_KEY.to_string(), None)] + .into_iter() + .collect(), + )?; + + tracing::info!( + ?WAIT_FOR_RESULTS_DURATION, + "sent message, waiting for scan task to add some results", + ); + + // Wait for the scan task to add some results + tokio::time::sleep(WAIT_FOR_RESULTS_DURATION).await; + + // Check that there are some results in the database for the key + + let storage = Storage::new(&shielded_scan_config, network, true); + + let results = storage.sapling_results(&ZECPAGES_SAPLING_VIEWING_KEY.to_string()); + + tracing::info!(?results, "got the results"); + + // Check that some results were added for the zecpages key that was not in the config or the db when ScanTask started. + assert!( + !results.is_empty(), + "there should be results for the newly registered key" + ); + + Ok(()) +} diff --git a/zebrad/tests/common/shielded_scan/subscribe_results.rs b/zebrad/tests/common/shielded_scan/subscribe_results.rs new file mode 100644 index 00000000000..9806619adf9 --- /dev/null +++ b/zebrad/tests/common/shielded_scan/subscribe_results.rs @@ -0,0 +1,107 @@ +//! Test registering and subscribing to the results for a new key in the scan task while zebrad is running. +//! +//! This test requires a cached chain state that is partially synchronized past the +//! Sapling activation height and [`REQUIRED_MIN_TIP_HEIGHT`] +//! +//! export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/state" +//! cargo test scan_subscribe_results --features="shielded-scan" -- --ignored --nocapture + +use std::time::Duration; + +use color_eyre::{eyre::eyre, Result}; + +use tower::ServiceBuilder; +use zebra_chain::{ + block::Height, + chain_tip::ChainTip, + parameters::{Network, NetworkUpgrade}, +}; + +use zebra_scan::{service::ScanTask, storage::Storage, tests::ZECPAGES_SAPLING_VIEWING_KEY}; + +use crate::common::{ + cached_state::start_state_service_with_cache_dir, launch::can_spawn_zebrad_for_test_type, + test_type::TestType, +}; + +/// The minimum required tip height for the cached state in this test. +const REQUIRED_MIN_TIP_HEIGHT: Height = Height(1_000_000); + +/// How long this test waits for a result before failing. +const WAIT_FOR_RESULTS_DURATION: Duration = Duration::from_secs(30 * 60); + +/// Initialize Zebra's state service with a cached state, add a new key to the scan task, and +/// check that it stores results for the new key without errors. +pub(crate) async fn run() -> Result<()> { + let _init_guard = zebra_test::init(); + + let test_type = TestType::UpdateZebraCachedStateNoRpc; + let test_name = "scan_subscribe_results"; + let network = Network::Mainnet; + + // Skip the test unless the user specifically asked for it and there is a zebrad_state_path + if !can_spawn_zebrad_for_test_type(test_name, test_type, true) { + return Ok(()); + } + + tracing::info!( + ?network, + ?test_type, + "running scan_subscribe_results test using zebra state service", + ); + + let zebrad_state_path = test_type + .zebrad_state_path(test_name) + .expect("already checked that there is a cached state path"); + + let (state_service, _read_state_service, latest_chain_tip, chain_tip_change) = + start_state_service_with_cache_dir(network, zebrad_state_path).await?; + + let chain_tip_height = latest_chain_tip + .best_tip_height() + .ok_or_else(|| eyre!("State directory doesn't have a chain tip block"))?; + + let sapling_activation_height = NetworkUpgrade::Sapling + .activation_height(network) + .expect("there should be an activation height for Mainnet"); + + assert!( + sapling_activation_height < REQUIRED_MIN_TIP_HEIGHT, + "minimum tip height should be above sapling activation height" + ); + + assert!( + REQUIRED_MIN_TIP_HEIGHT < chain_tip_height, + "chain tip height must be above required minimum tip height" + ); + + tracing::info!("opened state service with valid chain tip height, starting scan task",); + + let state = ServiceBuilder::new().buffer(10).service(state_service); + + // Create an ephemeral `Storage` instance + let storage = Storage::new(&zebra_scan::Config::ephemeral(), network, false); + let mut scan_task = ScanTask::spawn(storage, state, chain_tip_change); + + tracing::info!("started scan task, sending register/subscribe keys messages with zecpages key to start scanning for a new key",); + + let keys = [ZECPAGES_SAPLING_VIEWING_KEY.to_string()]; + scan_task.register_keys( + keys.iter() + .cloned() + .map(|key| (key, Some(780_000))) + .collect(), + )?; + + let mut result_receiver = scan_task + .subscribe(keys.into_iter().collect()) + .await + .expect("should send and receive message successfully"); + + // Wait for the scanner to send a result in the channel + let result = tokio::time::timeout(WAIT_FOR_RESULTS_DURATION, result_receiver.recv()).await?; + + tracing::info!(?result, "received a result from the channel"); + + Ok(()) +} diff --git a/zebrad/tests/common/sync.rs b/zebrad/tests/common/sync.rs index 599a10c4b5a..6f0b5c32c9d 100644 --- a/zebrad/tests/common/sync.rs +++ b/zebrad/tests/common/sync.rs @@ -164,10 +164,6 @@ impl MempoolBehavior { /// /// If `reuse_tempdir` is supplied, use it as the test's temporary directory. /// -/// If `height` is higher than the mandatory checkpoint, -/// configures `zebrad` to preload the Zcash parameters. -/// If it is lower, skips the parameter preload. -/// /// Configures `zebrad` to debug-enable the mempool based on `mempool_behavior`, /// then check the logs for the expected `mempool_behavior`. /// @@ -211,11 +207,6 @@ pub fn sync_until( config.mempool.debug_enable_at_height = mempool_behavior.enable_at_height(); config.consensus.checkpoint_sync = checkpoint_sync; - // Download the parameters at launch, if we're going to need them later. - if height > network.mandatory_checkpoint_height() { - config.consensus.debug_skip_parameter_preload = false; - } - // Use the default lookahead limit if we're syncing lots of blocks. // (Most tests use a smaller limit to minimise redundant block downloads.) if height > MIN_HEIGHT_FOR_DEFAULT_LOOKAHEAD { @@ -356,9 +347,6 @@ pub fn cached_mandatory_checkpoint_test_config(network: Network) -> Result Result Result<()> { @@ -389,7 +376,6 @@ pub fn create_cached_database_height( let mut config = cached_mandatory_checkpoint_test_config(network)?; // TODO: add convenience methods? config.state.debug_stop_at_height = Some(height.0); - config.consensus.debug_skip_parameter_preload = debug_skip_parameter_preload; config.consensus.checkpoint_sync = checkpoint_sync; let dir = PathBuf::from("/zebrad-cache"); diff --git a/zebrad/tests/common/test_type.rs b/zebrad/tests/common/test_type.rs index ddd7386c137..bb4291c3db7 100644 --- a/zebrad/tests/common/test_type.rs +++ b/zebrad/tests/common/test_type.rs @@ -191,10 +191,6 @@ impl TestType { Err(error) => return Some(Err(error)), }; - // We want to preload the consensus parameters, - // except when we're doing the quick empty state test - config.consensus.debug_skip_parameter_preload = !self.needs_zebra_cached_state(); - // We want to run multi-threaded RPCs, if we're using them if self.launches_lightwalletd() { // Automatically runs one thread per available CPU core