diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 0dd640186bb8..57a76ad2c857 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -33,6 +33,7 @@ variables: GIT_DEPTH: 100 CI_SERVER_NAME: "GitLab CI" CI_IMAGE: "paritytech/ci-linux:production" + BUILDAH_IMAGE: "quay.io/buildah/stable:v1.27" DOCKER_OS: "debian:stretch" ARCH: "x86_64" ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.2.78" @@ -40,6 +41,13 @@ variables: default: cache: {} + retry: + max: 2 + when: + - runner_system_failure + - unknown_failure + - api_failure + interruptible: true .collect-artifacts: artifacts: @@ -72,25 +80,12 @@ default: dotenv: pipeline-stopper.env .kubernetes-env: - retry: - max: 2 - when: - - runner_system_failure - - unknown_failure - - api_failure - interruptible: true + image: "${CI_IMAGE}" tags: - kubernetes-parity-build .docker-env: image: "${CI_IMAGE}" - retry: - max: 2 - when: - - runner_system_failure - - unknown_failure - - api_failure - interruptible: true tags: - linux-docker @@ -150,9 +145,6 @@ default: - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 .build-push-image: - extends: - - .kubernetes-env - image: quay.io/buildah/stable before_script: - test -s ./artifacts/VERSION || exit 1 - test -s ./artifacts/EXTRATAG || exit 1 @@ -196,8 +188,6 @@ include: # zombienet jobs - scripts/ci/gitlab/pipeline/zombienet.yml - - #### stage: .post deploy-parity-testnet: @@ -227,8 +217,24 @@ deploy-parity-testnet: PR_NUM: "${PR_NUM}" trigger: project: "parity/infrastructure/ci_cd/pipeline-stopper" + branch: "as-improve" + +remove-cancel-pipeline-message: + stage: .post + rules: + - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + variables: + PROJECT_ID: "${CI_PROJECT_ID}" + PROJECT_NAME: "${CI_PROJECT_NAME}" + PIPELINE_ID: "${CI_PIPELINE_ID}" + FAILED_JOB_URL: "https://gitlab.com" + FAILED_JOB_NAME: "nope" + PR_NUM: "${CI_COMMIT_REF_NAME}" + trigger: + project: "parity/infrastructure/ci_cd/pipeline-stopper" + branch: "as-improve" -.cancel-pipeline-test-linux-stable: +cancel-pipeline-test-linux-stable: extends: .cancel-pipeline-template needs: - job: test-linux-stable diff --git a/Cargo.lock b/Cargo.lock index 762220d287fe..d2619143ddc1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -428,7 +428,7 @@ dependencies = [ [[package]] name = "beefy-gadget" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "array-bytes", "async-trait", @@ -465,7 +465,7 @@ dependencies = [ [[package]] name = "beefy-gadget-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "beefy-gadget", "beefy-primitives", @@ -485,7 +485,7 @@ dependencies = [ [[package]] name = "beefy-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "beefy-primitives", "sp-api", @@ -495,7 +495,7 @@ dependencies = [ [[package]] name = "beefy-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "parity-scale-codec 3.2.1", "scale-info", @@ -2032,7 +2032,7 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "parity-scale-codec 3.2.1", ] @@ -2055,7 +2055,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-support", "frame-system", @@ -2078,7 +2078,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "Inflector", "array-bytes", @@ -2130,7 +2130,7 @@ dependencies = [ [[package]] name = "frame-election-provider-solution-type" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2141,7 +2141,7 @@ dependencies = [ [[package]] name = "frame-election-provider-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-election-provider-solution-type", "frame-support", @@ -2157,7 +2157,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-support", "frame-system", @@ -2186,7 +2186,7 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "bitflags", "frame-metadata", @@ -2218,7 +2218,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "Inflector", "cfg-expr", @@ -2232,7 +2232,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", @@ -2244,7 +2244,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "proc-macro2", "quote", @@ -2254,7 +2254,7 @@ dependencies = [ [[package]] name = "frame-support-test" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-support", "frame-support-test-pallet", @@ -2277,7 +2277,7 @@ dependencies = [ [[package]] name = "frame-support-test-pallet" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-support", "frame-system", @@ -2288,7 +2288,7 @@ dependencies = [ [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-support", "log", @@ -2306,7 +2306,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -2321,7 +2321,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "parity-scale-codec 3.2.1", "sp-api", @@ -2330,7 +2330,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-support", "parity-scale-codec 3.2.1", @@ -2507,7 +2507,7 @@ dependencies = [ [[package]] name = "generate-bags" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "chrono", "frame-election-provider-support", @@ -4673,7 +4673,7 @@ checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" [[package]] name = "pallet-assets" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -4687,7 +4687,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-support", "frame-system", @@ -4703,7 +4703,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-support", "frame-system", @@ -4718,7 +4718,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -4742,7 +4742,7 @@ dependencies = [ [[package]] name = "pallet-bags-list" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -4762,7 +4762,7 @@ dependencies = [ [[package]] name = "pallet-bags-list-remote-tests" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-election-provider-support", "frame-support", @@ -4781,7 +4781,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -4796,7 +4796,7 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "beefy-primitives", "frame-support", @@ -4812,7 +4812,7 @@ dependencies = [ [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "array-bytes", "beefy-merkle-tree", @@ -4835,7 +4835,7 @@ dependencies = [ [[package]] name = "pallet-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -4853,7 +4853,7 @@ dependencies = [ [[package]] name = "pallet-child-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -4872,7 +4872,7 @@ dependencies = [ [[package]] name = "pallet-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -4889,7 +4889,7 @@ dependencies = [ [[package]] name = "pallet-conviction-voting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "assert_matches", "frame-benchmarking", @@ -4906,7 +4906,7 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -4924,7 +4924,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -4948,7 +4948,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-support-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -4961,7 +4961,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -4979,7 +4979,7 @@ dependencies = [ [[package]] name = "pallet-fast-unstake" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -4997,7 +4997,7 @@ dependencies = [ [[package]] name = "pallet-gilt" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5012,7 +5012,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5035,7 +5035,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "enumflags2", "frame-benchmarking", @@ -5051,7 +5051,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5071,7 +5071,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5088,7 +5088,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5105,7 +5105,7 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "ckb-merkle-mountain-range", "frame-benchmarking", @@ -5123,7 +5123,7 @@ dependencies = [ [[package]] name = "pallet-mmr-rpc" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "anyhow", "jsonrpsee", @@ -5139,7 +5139,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5155,7 +5155,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-support", "frame-system", @@ -5172,7 +5172,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-benchmarking" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5192,7 +5192,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-runtime-api" version = "1.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "parity-scale-codec 3.2.1", "sp-api", @@ -5202,7 +5202,7 @@ dependencies = [ [[package]] name = "pallet-offences" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-support", "frame-system", @@ -5219,7 +5219,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5242,7 +5242,7 @@ dependencies = [ [[package]] name = "pallet-preimage" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5259,7 +5259,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5274,7 +5274,7 @@ dependencies = [ [[package]] name = "pallet-ranked-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5292,7 +5292,7 @@ dependencies = [ [[package]] name = "pallet-recovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5307,7 +5307,7 @@ dependencies = [ [[package]] name = "pallet-referenda" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "assert_matches", "frame-benchmarking", @@ -5325,7 +5325,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5341,7 +5341,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-support", "frame-system", @@ -5362,7 +5362,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5378,7 +5378,7 @@ dependencies = [ [[package]] name = "pallet-society" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-support", "frame-system", @@ -5392,7 +5392,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5415,7 +5415,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -5426,7 +5426,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-fn" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "log", "sp-arithmetic", @@ -5435,7 +5435,7 @@ dependencies = [ [[package]] name = "pallet-state-trie-migration" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5452,7 +5452,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-support", "frame-system", @@ -5466,7 +5466,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5484,7 +5484,7 @@ dependencies = [ [[package]] name = "pallet-tips" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5503,7 +5503,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-support", "frame-system", @@ -5519,7 +5519,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -5535,7 +5535,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "pallet-transaction-payment", "parity-scale-codec 3.2.1", @@ -5547,7 +5547,7 @@ dependencies = [ [[package]] name = "pallet-treasury" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5564,7 +5564,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5580,7 +5580,7 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -5595,7 +5595,7 @@ dependencies = [ [[package]] name = "pallet-whitelist" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-benchmarking", "frame-support", @@ -8117,7 +8117,7 @@ dependencies = [ [[package]] name = "remote-externalities" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "env_logger", "log", @@ -8471,7 +8471,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "log", "sp-core", @@ -8482,7 +8482,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "async-trait", "futures", @@ -8509,7 +8509,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "futures", "futures-timer", @@ -8532,7 +8532,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "parity-scale-codec 3.2.1", "sc-client-api", @@ -8548,7 +8548,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "impl-trait-for-tuples", "memmap2", @@ -8565,7 +8565,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -8576,7 +8576,7 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "array-bytes", "chrono", @@ -8616,7 +8616,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "fnv", "futures", @@ -8644,7 +8644,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "hash-db", "kvdb", @@ -8669,7 +8669,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "async-trait", "futures", @@ -8693,7 +8693,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "async-trait", "fork-tree", @@ -8734,7 +8734,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "futures", "jsonrpsee", @@ -8756,7 +8756,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "fork-tree", "parity-scale-codec 3.2.1", @@ -8769,7 +8769,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "async-trait", "futures", @@ -8793,7 +8793,7 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "lazy_static", "lru", @@ -8819,7 +8819,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "environmental", "parity-scale-codec 3.2.1", @@ -8835,7 +8835,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmi" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "log", "parity-scale-codec 3.2.1", @@ -8850,7 +8850,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "cfg-if", "libc", @@ -8870,7 +8870,7 @@ dependencies = [ [[package]] name = "sc-finality-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "ahash", "array-bytes", @@ -8911,7 +8911,7 @@ dependencies = [ [[package]] name = "sc-finality-grandpa-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "finality-grandpa", "futures", @@ -8932,7 +8932,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "ansi_term", "futures", @@ -8949,7 +8949,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "array-bytes", "async-trait", @@ -8964,7 +8964,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "array-bytes", "async-trait", @@ -9011,7 +9011,7 @@ dependencies = [ [[package]] name = "sc-network-bitswap" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "cid", "futures", @@ -9031,7 +9031,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "async-trait", "bitflags", @@ -9057,7 +9057,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "ahash", "futures", @@ -9075,7 +9075,7 @@ dependencies = [ [[package]] name = "sc-network-light" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "array-bytes", "futures", @@ -9096,9 +9096,10 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "array-bytes", + "async-trait", "fork-tree", "futures", "libp2p", @@ -9126,7 +9127,7 @@ dependencies = [ [[package]] name = "sc-network-transactions" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "array-bytes", "futures", @@ -9145,7 +9146,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "array-bytes", "bytes", @@ -9175,7 +9176,7 @@ dependencies = [ [[package]] name = "sc-peerset" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "futures", "libp2p", @@ -9188,7 +9189,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -9197,7 +9198,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "futures", "hash-db", @@ -9227,7 +9228,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "futures", "jsonrpsee", @@ -9250,7 +9251,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "futures", "jsonrpsee", @@ -9263,7 +9264,7 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "futures", "hex", @@ -9282,7 +9283,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "async-trait", "directories", @@ -9353,7 +9354,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "log", "parity-scale-codec 3.2.1", @@ -9367,7 +9368,7 @@ dependencies = [ [[package]] name = "sc-sync-state-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "jsonrpsee", "parity-scale-codec 3.2.1", @@ -9386,7 +9387,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "6.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "futures", "libc", @@ -9405,7 +9406,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "chrono", "futures", @@ -9423,7 +9424,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "ansi_term", "atty", @@ -9454,7 +9455,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -9465,7 +9466,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "async-trait", "futures", @@ -9492,7 +9493,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "async-trait", "futures", @@ -9506,7 +9507,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "futures", "futures-timer", @@ -9989,7 +9990,7 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "hash-db", "log", @@ -10007,7 +10008,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "blake2", "proc-macro-crate", @@ -10018,8 +10019,8 @@ dependencies = [ [[package]] name = "sp-application-crypto" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +version = "7.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "parity-scale-codec 3.2.1", "scale-info", @@ -10031,8 +10032,8 @@ dependencies = [ [[package]] name = "sp-arithmetic" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +version = "6.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "integer-sqrt", "num-traits", @@ -10047,7 +10048,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "parity-scale-codec 3.2.1", "scale-info", @@ -10060,7 +10061,7 @@ dependencies = [ [[package]] name = "sp-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "async-trait", "parity-scale-codec 3.2.1", @@ -10072,7 +10073,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "parity-scale-codec 3.2.1", "sp-api", @@ -10084,7 +10085,7 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "futures", "log", @@ -10102,7 +10103,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "async-trait", "futures", @@ -10121,7 +10122,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "async-trait", "merlin", @@ -10144,7 +10145,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "parity-scale-codec 3.2.1", "scale-info", @@ -10158,7 +10159,7 @@ dependencies = [ [[package]] name = "sp-consensus-vrf" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "parity-scale-codec 3.2.1", "scale-info", @@ -10170,8 +10171,8 @@ dependencies = [ [[package]] name = "sp-core" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +version = "7.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "array-bytes", "base58", @@ -10215,8 +10216,8 @@ dependencies = [ [[package]] name = "sp-core-hashing" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +version = "5.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "blake2", "byteorder", @@ -10230,7 +10231,7 @@ dependencies = [ [[package]] name = "sp-core-hashing-proc-macro" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "proc-macro2", "quote", @@ -10241,7 +10242,7 @@ dependencies = [ [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "kvdb", "parking_lot 0.12.1", @@ -10249,8 +10250,8 @@ dependencies = [ [[package]] name = "sp-debug-derive" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +version = "5.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "proc-macro2", "quote", @@ -10259,8 +10260,8 @@ dependencies = [ [[package]] name = "sp-externalities" -version = "0.12.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +version = "0.13.0" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "environmental", "parity-scale-codec 3.2.1", @@ -10271,7 +10272,7 @@ dependencies = [ [[package]] name = "sp-finality-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "finality-grandpa", "log", @@ -10289,7 +10290,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -10302,8 +10303,8 @@ dependencies = [ [[package]] name = "sp-io" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +version = "7.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "bytes", "futures", @@ -10328,8 +10329,8 @@ dependencies = [ [[package]] name = "sp-keyring" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +version = "7.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "lazy_static", "sp-core", @@ -10339,8 +10340,8 @@ dependencies = [ [[package]] name = "sp-keystore" -version = "0.12.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +version = "0.13.0" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "async-trait", "futures", @@ -10357,7 +10358,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "thiserror", "zstd", @@ -10366,7 +10367,7 @@ dependencies = [ [[package]] name = "sp-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "log", "parity-scale-codec 3.2.1", @@ -10383,7 +10384,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "parity-scale-codec 3.2.1", "scale-info", @@ -10397,7 +10398,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "sp-api", "sp-core", @@ -10406,8 +10407,8 @@ dependencies = [ [[package]] name = "sp-panic-handler" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +version = "5.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "backtrace", "lazy_static", @@ -10417,7 +10418,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "rustc-hash", "serde", @@ -10426,8 +10427,8 @@ dependencies = [ [[package]] name = "sp-runtime" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +version = "7.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "either", "hash256-std-hasher", @@ -10449,8 +10450,8 @@ dependencies = [ [[package]] name = "sp-runtime-interface" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +version = "7.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -10467,8 +10468,8 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +version = "6.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "Inflector", "proc-macro-crate", @@ -10480,7 +10481,7 @@ dependencies = [ [[package]] name = "sp-sandbox" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "log", "parity-scale-codec 3.2.1", @@ -10494,7 +10495,7 @@ dependencies = [ [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "parity-scale-codec 3.2.1", "scale-info", @@ -10508,7 +10509,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "parity-scale-codec 3.2.1", "scale-info", @@ -10518,8 +10519,8 @@ dependencies = [ [[package]] name = "sp-state-machine" -version = "0.12.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +version = "0.13.0" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "hash-db", "log", @@ -10540,13 +10541,13 @@ dependencies = [ [[package]] name = "sp-std" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +version = "5.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" [[package]] name = "sp-storage" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +version = "7.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "impl-serde", "parity-scale-codec 3.2.1", @@ -10559,7 +10560,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "async-trait", "futures-timer", @@ -10574,8 +10575,8 @@ dependencies = [ [[package]] name = "sp-tracing" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +version = "6.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "parity-scale-codec 3.2.1", "sp-std", @@ -10587,7 +10588,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "sp-api", "sp-runtime", @@ -10596,7 +10597,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "async-trait", "log", @@ -10611,8 +10612,8 @@ dependencies = [ [[package]] name = "sp-trie" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +version = "7.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "ahash", "hash-db", @@ -10635,7 +10636,7 @@ dependencies = [ [[package]] name = "sp-version" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "impl-serde", "parity-scale-codec 3.2.1", @@ -10652,7 +10653,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "parity-scale-codec 3.2.1", "proc-macro2", @@ -10662,8 +10663,8 @@ dependencies = [ [[package]] name = "sp-wasm-interface" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +version = "7.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "impl-trait-for-tuples", "log", @@ -10676,7 +10677,7 @@ dependencies = [ [[package]] name = "sp-weights" version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec 3.2.1", @@ -10891,7 +10892,7 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "platforms", ] @@ -10899,7 +10900,7 @@ dependencies = [ [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "frame-system-rpc-runtime-api", "futures", @@ -10920,7 +10921,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "futures-util", "hyper", @@ -10933,7 +10934,7 @@ dependencies = [ [[package]] name = "substrate-rpc-client" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "async-trait", "jsonrpsee", @@ -10946,7 +10947,7 @@ dependencies = [ [[package]] name = "substrate-state-trie-migration-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "jsonrpsee", "log", @@ -10967,7 +10968,7 @@ dependencies = [ [[package]] name = "substrate-test-client" version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "array-bytes", "async-trait", @@ -10993,7 +10994,7 @@ dependencies = [ [[package]] name = "substrate-test-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "futures", "substrate-test-utils-derive", @@ -11003,7 +11004,7 @@ dependencies = [ [[package]] name = "substrate-test-utils-derive" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -11014,7 +11015,7 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "ansi_term", "build-helper", @@ -11708,7 +11709,7 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "try-runtime-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" +source = "git+https://github.com/paritytech/substrate?branch=master#39ef178c439e606e608b4b27d435b45e6692fa0a" dependencies = [ "clap", "frame-try-runtime", diff --git a/Cargo.toml b/Cargo.toml index 837539ad57f5..22021b376001 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -127,7 +127,6 @@ maintenance = { status = "actively-developed" } # This list is ordered alphabetically. [profile.dev.package] blake2 = { opt-level = 3 } -blake2-rfc = { opt-level = 3 } blake2b_simd = { opt-level = 3 } chacha20poly1305 = { opt-level = 3 } cranelift-codegen = { opt-level = 3 } diff --git a/cli/Cargo.toml b/cli/Cargo.toml index c1fe95795328..13739d396b24 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -43,8 +43,7 @@ sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master", substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "master" } [features] -default = ["wasmtime", "db", "cli", "hostperfcheck", "full-node", "trie-memory-tracker", "polkadot-native"] -wasmtime = ["sc-cli/wasmtime"] +default = ["db", "cli", "hostperfcheck", "full-node", "trie-memory-tracker", "polkadot-native"] db = ["service/db"] cli = [ "clap", diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index cfebd1065edc..bc63549795c2 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -1144,7 +1144,7 @@ async fn handle_from_overseer( FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => { let mut actions = Vec::new(); - for activated in update.activated { + if let Some(activated) = update.activated { let head = activated.hash; match import::handle_new_head(ctx, state, db, head, &*last_finalized_height).await { Err(e) => return Err(SubsystemError::with_origin("db", e)), diff --git a/node/core/av-store/src/metrics.rs b/node/core/av-store/src/metrics.rs index c50932c6173e..fedeb2b7d0e5 100644 --- a/node/core/av-store/src/metrics.rs +++ b/node/core/av-store/src/metrics.rs @@ -140,10 +140,16 @@ impl metrics::Metrics for Metrics { registry, )?, get_chunk: prometheus::register( - prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( - "polkadot_parachain_av_store_get_chunk", - "Time spent fetching requested chunks.`", - ))?, + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_parachain_av_store_get_chunk", + "Time spent fetching requested chunks.`", + ) + .buckets(vec![ + 0.000625, 0.00125, 0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.1, 0.25, + 0.5, 1.0, 2.5, 5.0, 10.0, + ]), + )?, registry, )?, }; diff --git a/node/core/bitfield-signing/src/lib.rs b/node/core/bitfield-signing/src/lib.rs index 13a3dd28705f..64cbb18cc44c 100644 --- a/node/core/bitfield-signing/src/lib.rs +++ b/node/core/bitfield-signing/src/lib.rs @@ -225,7 +225,7 @@ async fn run( } } - for leaf in update.activated { + if let Some(leaf) = update.activated { let sender = ctx.sender().clone(); let leaf_hash = leaf.hash; diff --git a/node/core/bitfield-signing/src/metrics.rs b/node/core/bitfield-signing/src/metrics.rs index ab4e73be0eeb..571a0c335bd7 100644 --- a/node/core/bitfield-signing/src/metrics.rs +++ b/node/core/bitfield-signing/src/metrics.rs @@ -50,10 +50,16 @@ impl metrics::Metrics for Metrics { registry, )?, run: prometheus::register( - prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( - "polkadot_parachain_bitfield_signing_run", - "Time spent within `bitfield_signing::run`", - ))?, + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_parachain_bitfield_signing_run", + "Time spent within `bitfield_signing::run`", + ) + .buckets(vec![ + 0.000625, 0.00125, 0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.1, 0.25, + 0.5, 1.0, 2.5, 5.0, 10.0, + ]), + )?, registry, )?, }; diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index eb5ceac9b768..e5ffe6811d6e 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -430,7 +430,7 @@ where return Ok(()) } FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => { - for leaf in update.activated { + if let Some(leaf) = update.activated { let write_ops = handle_active_leaf( ctx.sender(), &*backend, diff --git a/node/core/dispute-coordinator/src/db/v1.rs b/node/core/dispute-coordinator/src/db/v1.rs index bb1456a59745..ab571108af37 100644 --- a/node/core/dispute-coordinator/src/db/v1.rs +++ b/node/core/dispute-coordinator/src/db/v1.rs @@ -32,7 +32,7 @@ use crate::{ backend::{Backend, BackendWriteOp, OverlayedBackend}, error::{FatalError, FatalResult}, metrics::Metrics, - DISPUTE_WINDOW, LOG_TARGET, + LOG_TARGET, }; const RECENT_DISPUTES_KEY: &[u8; 15] = b"recent-disputes"; @@ -318,25 +318,24 @@ pub(crate) fn load_recent_disputes( /// /// If one or more ancient sessions are pruned, all metadata on candidates within the ancient /// session will be deleted. -pub(crate) fn note_current_session( +pub(crate) fn note_earliest_session( overlay_db: &mut OverlayedBackend<'_, impl Backend>, - current_session: SessionIndex, + new_earliest_session: SessionIndex, ) -> SubsystemResult<()> { - let new_earliest = current_session.saturating_sub(DISPUTE_WINDOW.get()); match overlay_db.load_earliest_session()? { None => { // First launch - write new-earliest. - overlay_db.write_earliest_session(new_earliest); + overlay_db.write_earliest_session(new_earliest_session); }, - Some(prev_earliest) if new_earliest > prev_earliest => { + Some(prev_earliest) if new_earliest_session > prev_earliest => { // Prune all data in the outdated sessions. - overlay_db.write_earliest_session(new_earliest); + overlay_db.write_earliest_session(new_earliest_session); // Clear recent disputes metadata. { let mut recent_disputes = overlay_db.load_recent_disputes()?.unwrap_or_default(); - let lower_bound = (new_earliest, CandidateHash(Hash::repeat_byte(0x00))); + let lower_bound = (new_earliest_session, CandidateHash(Hash::repeat_byte(0x00))); let new_recent_disputes = recent_disputes.split_off(&lower_bound); // Any remanining disputes are considered ancient and must be pruned. @@ -373,6 +372,7 @@ mod tests { use super::*; use ::test_helpers::{dummy_candidate_receipt, dummy_hash}; + use polkadot_node_primitives::DISPUTE_WINDOW; use polkadot_primitives::v2::{Hash, Id as ParaId}; fn make_db() -> DbBackend { @@ -422,7 +422,7 @@ mod tests { let mut overlay_db = OverlayedBackend::new(&backend); gum::trace!(target: LOG_TARGET, ?current_session, "Noting current session"); - note_current_session(&mut overlay_db, current_session).unwrap(); + note_earliest_session(&mut overlay_db, earliest_session).unwrap(); let write_ops = overlay_db.into_write_ops(); backend.write(write_ops).unwrap(); @@ -442,7 +442,7 @@ mod tests { let current_session = current_session + 1; let earliest_session = earliest_session + 1; - note_current_session(&mut overlay_db, current_session).unwrap(); + note_earliest_session(&mut overlay_db, earliest_session).unwrap(); let write_ops = overlay_db.into_write_ops(); backend.write(write_ops).unwrap(); @@ -599,7 +599,7 @@ mod tests { } #[test] - fn note_current_session_prunes_old() { + fn note_earliest_session_prunes_old() { let mut backend = make_db(); let hash_a = CandidateHash(Hash::repeat_byte(0x0a)); @@ -648,7 +648,7 @@ mod tests { backend.write(write_ops).unwrap(); let mut overlay_db = OverlayedBackend::new(&backend); - note_current_session(&mut overlay_db, current_session).unwrap(); + note_earliest_session(&mut overlay_db, new_earliest_session).unwrap(); assert_eq!(overlay_db.load_earliest_session().unwrap(), Some(new_earliest_session)); diff --git a/node/core/dispute-coordinator/src/import.rs b/node/core/dispute-coordinator/src/import.rs index 8eb3d173dcf7..c0f0d3d9e009 100644 --- a/node/core/dispute-coordinator/src/import.rs +++ b/node/core/dispute-coordinator/src/import.rs @@ -28,7 +28,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; -use polkadot_node_primitives::{CandidateVotes, SignedDisputeStatement}; +use polkadot_node_primitives::{CandidateVotes, DisputeStatus, SignedDisputeStatement, Timestamp}; use polkadot_node_subsystem_util::rolling_session_window::RollingSessionWindow; use polkadot_primitives::v2::{ CandidateReceipt, DisputeStatement, IndexedVec, SessionIndex, SessionInfo, @@ -154,22 +154,8 @@ pub struct CandidateVoteState { /// Information about own votes: own_vote: OwnVoteState, - /// Whether or not the dispute concluded invalid. - concluded_invalid: bool, - - /// Whether or not the dispute concluded valid. - /// - /// Note: Due to equivocations it is technically possible for a dispute to conclude both valid - /// and invalid. In that case the invalid result takes precedence. - concluded_valid: bool, - - /// There is an ongoing dispute and we reached f+1 votes -> the dispute is confirmed - /// - /// as at least one honest validator cast a vote for the candidate. - is_confirmed: bool, - - /// Whether or not we have an ongoing dispute. - is_disputed: bool, + /// Current dispute status, if there is any. + dispute_status: Option, } impl CandidateVoteState { @@ -179,18 +165,11 @@ impl CandidateVoteState { pub fn new_from_receipt(candidate_receipt: CandidateReceipt) -> Self { let votes = CandidateVotes { candidate_receipt, valid: BTreeMap::new(), invalid: BTreeMap::new() }; - Self { - votes, - own_vote: OwnVoteState::NoVote, - concluded_invalid: false, - concluded_valid: false, - is_confirmed: false, - is_disputed: false, - } + Self { votes, own_vote: OwnVoteState::NoVote, dispute_status: None } } /// Create a new `CandidateVoteState` from already existing votes. - pub fn new<'a>(votes: CandidateVotes, env: &CandidateEnvironment<'a>) -> Self { + pub fn new<'a>(votes: CandidateVotes, env: &CandidateEnvironment<'a>, now: Timestamp) -> Self { let own_vote = OwnVoteState::new(&votes, env); let n_validators = env.validators().len(); @@ -198,16 +177,31 @@ impl CandidateVoteState { let supermajority_threshold = polkadot_primitives::v2::supermajority_threshold(n_validators); - let concluded_invalid = votes.invalid.len() >= supermajority_threshold; - let concluded_valid = votes.valid.len() >= supermajority_threshold; - // We have a dispute, if we have votes on both sides: let is_disputed = !votes.invalid.is_empty() && !votes.valid.is_empty(); - let byzantine_threshold = polkadot_primitives::v2::byzantine_threshold(n_validators); - let is_confirmed = votes.voted_indices().len() > byzantine_threshold && is_disputed; + let dispute_status = if is_disputed { + let mut status = DisputeStatus::active(); + let byzantine_threshold = polkadot_primitives::v2::byzantine_threshold(n_validators); + let is_confirmed = votes.voted_indices().len() > byzantine_threshold; + if is_confirmed { + status = status.confirm(); + }; + let concluded_for = votes.valid.len() >= supermajority_threshold; + if concluded_for { + status = status.conclude_for(now); + }; + + let concluded_against = votes.invalid.len() >= supermajority_threshold; + if concluded_against { + status = status.conclude_against(now); + }; + Some(status) + } else { + None + }; - Self { votes, own_vote, concluded_invalid, concluded_valid, is_confirmed, is_disputed } + Self { votes, own_vote, dispute_status } } /// Import fresh statements. @@ -217,6 +211,7 @@ impl CandidateVoteState { self, env: &CandidateEnvironment, statements: Vec<(SignedDisputeStatement, ValidatorIndex)>, + now: Timestamp, ) -> ImportResult { let (mut votes, old_state) = self.into_old_state(); @@ -294,7 +289,7 @@ impl CandidateVoteState { } } - let new_state = Self::new(votes, env); + let new_state = Self::new(votes, env, now); ImportResult { old_state, @@ -313,32 +308,15 @@ impl CandidateVoteState { /// Extract `CandidateVotes` for handling import of new statements. fn into_old_state(self) -> (CandidateVotes, CandidateVoteState<()>) { - let CandidateVoteState { - votes, - own_vote, - concluded_invalid, - concluded_valid, - is_confirmed, - is_disputed, - } = self; - ( - votes, - CandidateVoteState { - votes: (), - own_vote, - concluded_invalid, - concluded_valid, - is_confirmed, - is_disputed, - }, - ) + let CandidateVoteState { votes, own_vote, dispute_status } = self; + (votes, CandidateVoteState { votes: (), own_vote, dispute_status }) } } impl CandidateVoteState { /// Whether or not we have an ongoing dispute. pub fn is_disputed(&self) -> bool { - self.is_disputed + self.dispute_status.is_some() } /// Whether there is an ongoing confirmed dispute. @@ -346,7 +324,7 @@ impl CandidateVoteState { /// This checks whether there is a dispute ongoing and we have more than byzantine threshold /// votes. pub fn is_confirmed(&self) -> bool { - self.is_confirmed + self.dispute_status.map_or(false, |s| s.is_confirmed_concluded()) } /// This machine already cast some vote in that dispute/for that candidate. @@ -359,14 +337,19 @@ impl CandidateVoteState { self.own_vote.approval_votes() } - /// Whether or not this dispute has already enough valid votes to conclude. - pub fn is_concluded_valid(&self) -> bool { - self.concluded_valid + /// Whether or not there is a dispute and it has already enough valid votes to conclude. + pub fn has_concluded_for(&self) -> bool { + self.dispute_status.map_or(false, |s| s.has_concluded_for()) + } + + /// Whether or not there is a dispute and it has already enough invalid votes to conclude. + pub fn has_concluded_against(&self) -> bool { + self.dispute_status.map_or(false, |s| s.has_concluded_against()) } - /// Whether or not this dispute has already enough invalid votes to conclude. - pub fn is_concluded_invalid(&self) -> bool { - self.concluded_invalid + /// Get access to the dispute status, in case there is one. + pub fn dispute_status(&self) -> &Option { + &self.dispute_status } /// Access to underlying votes. @@ -451,18 +434,18 @@ impl ImportResult { } /// Whether or not any dispute just concluded valid due to the import. - pub fn is_freshly_concluded_valid(&self) -> bool { - !self.old_state().is_concluded_valid() && self.new_state().is_concluded_valid() + pub fn is_freshly_concluded_for(&self) -> bool { + !self.old_state().has_concluded_for() && self.new_state().has_concluded_for() } /// Whether or not any dispute just concluded invalid due to the import. - pub fn is_freshly_concluded_invalid(&self) -> bool { - !self.old_state().is_concluded_invalid() && self.new_state().is_concluded_invalid() + pub fn is_freshly_concluded_against(&self) -> bool { + !self.old_state().has_concluded_against() && self.new_state().has_concluded_against() } /// Whether or not any dispute just concluded either invalid or valid due to the import. pub fn is_freshly_concluded(&self) -> bool { - self.is_freshly_concluded_invalid() || self.is_freshly_concluded_valid() + self.is_freshly_concluded_against() || self.is_freshly_concluded_for() } /// Modify this `ImportResult`s, by importing additional approval votes. @@ -473,6 +456,7 @@ impl ImportResult { self, env: &CandidateEnvironment, approval_votes: HashMap, + now: Timestamp, ) -> Self { let Self { old_state, @@ -508,7 +492,7 @@ impl ImportResult { } } - let new_state = CandidateVoteState::new(votes, env); + let new_state = CandidateVoteState::new(votes, env, now); Self { old_state, diff --git a/node/core/dispute-coordinator/src/initialized.rs b/node/core/dispute-coordinator/src/initialized.rs index 901a6d863ed2..4a2076a225be 100644 --- a/node/core/dispute-coordinator/src/initialized.rs +++ b/node/core/dispute-coordinator/src/initialized.rs @@ -27,7 +27,7 @@ use sc_keystore::LocalKeystore; use polkadot_node_primitives::{ CandidateVotes, DisputeMessage, DisputeMessageCheckError, DisputeStatus, - SignedDisputeStatement, Timestamp, DISPUTE_WINDOW, + SignedDisputeStatement, Timestamp, }; use polkadot_node_subsystem::{ messages::{ @@ -299,7 +299,7 @@ impl Initialized { self.highest_session = session; - db::v1::note_current_session(overlay_db, session)?; + db::v1::note_earliest_session(overlay_db, new_window_start)?; self.spam_slots.prune_old(new_window_start); } }, @@ -617,7 +617,9 @@ impl Initialized { let _ = tx.send( get_active_with_status(recent_disputes.into_iter(), now) - .map(|(k, _)| k) + .map(|((session_idx, candidate_hash), dispute_status)| { + (session_idx, candidate_hash, dispute_status) + }) .collect(), ); }, @@ -706,8 +708,8 @@ impl Initialized { now: Timestamp, ) -> Result { gum::trace!(target: LOG_TARGET, ?statements, "In handle import statements"); - if session + DISPUTE_WINDOW.get() < self.highest_session { - // It is not valid to participate in an ancient dispute (spam?). + if !self.rolling_session_window.contains(session) { + // It is not valid to participate in an ancient dispute (spam?) or too new. return Ok(ImportStatementsResult::InvalidImport) } @@ -748,7 +750,7 @@ impl Initialized { .load_candidate_votes(session, &candidate_hash)? .map(CandidateVotes::from) { - Some(votes) => CandidateVoteState::new(votes, &env), + Some(votes) => CandidateVoteState::new(votes, &env, now), None => if let MaybeCandidateReceipt::Provides(candidate_receipt) = candidate_receipt { CandidateVoteState::new_from_receipt(candidate_receipt) @@ -766,7 +768,7 @@ impl Initialized { gum::trace!(target: LOG_TARGET, ?candidate_hash, ?session, "Loaded votes"); let import_result = { - let intermediate_result = old_state.import_statements(&env, statements); + let intermediate_result = old_state.import_statements(&env, statements, now); // Handle approval vote import: // @@ -803,7 +805,7 @@ impl Initialized { ); intermediate_result }, - Ok(votes) => intermediate_result.import_approval_votes(&env, votes), + Ok(votes) => intermediate_result.import_approval_votes(&env, votes, now), } } else { gum::trace!( @@ -977,43 +979,34 @@ impl Initialized { } // All good, update recent disputes if state has changed: - if import_result.dispute_state_changed() { - let mut recent_disputes = overlay_db.load_recent_disputes()?.unwrap_or_default(); + if let Some(new_status) = new_state.dispute_status() { + // Only bother with db access, if there was an actual change. + if import_result.dispute_state_changed() { + let mut recent_disputes = overlay_db.load_recent_disputes()?.unwrap_or_default(); + + let status = + recent_disputes.entry((session, candidate_hash)).or_insert_with(|| { + gum::info!( + target: LOG_TARGET, + ?candidate_hash, + session, + "New dispute initiated for candidate.", + ); + DisputeStatus::active() + }); - let status = recent_disputes.entry((session, candidate_hash)).or_insert_with(|| { - gum::info!( + *status = *new_status; + + gum::trace!( target: LOG_TARGET, ?candidate_hash, - session, - "New dispute initiated for candidate.", + ?status, + has_concluded_for = ?new_state.has_concluded_for(), + has_concluded_against = ?new_state.has_concluded_against(), + "Writing recent disputes with updates for candidate" ); - DisputeStatus::active() - }); - - if new_state.is_confirmed() { - *status = status.confirm(); + overlay_db.write_recent_disputes(recent_disputes); } - - // Note: concluded-invalid overwrites concluded-valid, - // so we do this check first. Dispute state machine is - // non-commutative. - if new_state.is_concluded_valid() { - *status = status.concluded_for(now); - } - - if new_state.is_concluded_invalid() { - *status = status.concluded_against(now); - } - - gum::trace!( - target: LOG_TARGET, - ?candidate_hash, - ?status, - is_concluded_valid = ?new_state.is_concluded_valid(), - is_concluded_invalid = ?new_state.is_concluded_invalid(), - "Writing recent disputes with updates for candidate" - ); - overlay_db.write_recent_disputes(recent_disputes); } // Update metrics: @@ -1036,7 +1029,7 @@ impl Initialized { ); self.metrics.on_approval_votes(import_result.imported_approval_votes()); - if import_result.is_freshly_concluded_valid() { + if import_result.is_freshly_concluded_for() { gum::info!( target: LOG_TARGET, ?candidate_hash, @@ -1045,7 +1038,7 @@ impl Initialized { ); self.metrics.on_concluded_valid(); } - if import_result.is_freshly_concluded_invalid() { + if import_result.is_freshly_concluded_against() { gum::info!( target: LOG_TARGET, ?candidate_hash, diff --git a/node/core/dispute-coordinator/src/lib.rs b/node/core/dispute-coordinator/src/lib.rs index 09d6c621b999..e7ac66ce2ece 100644 --- a/node/core/dispute-coordinator/src/lib.rs +++ b/node/core/dispute-coordinator/src/lib.rs @@ -30,7 +30,7 @@ use futures::FutureExt; use sc_keystore::LocalKeystore; -use polkadot_node_primitives::{CandidateVotes, DISPUTE_WINDOW}; +use polkadot_node_primitives::CandidateVotes; use polkadot_node_subsystem::{ overseer, ActivatedLeaf, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; @@ -272,7 +272,7 @@ impl DisputeCoordinatorSubsystem { ChainScraper, )> { // Prune obsolete disputes: - db::v1::note_current_session(overlay_db, rolling_session_window.latest_session())?; + db::v1::note_earliest_session(overlay_db, rolling_session_window.earliest_session())?; let active_disputes = match overlay_db.load_recent_disputes() { Ok(Some(disputes)) => diff --git a/node/core/dispute-coordinator/src/participation/queues/mod.rs b/node/core/dispute-coordinator/src/participation/queues/mod.rs index 3ec217628625..d2fcab1ba258 100644 --- a/node/core/dispute-coordinator/src/participation/queues/mod.rs +++ b/node/core/dispute-coordinator/src/participation/queues/mod.rs @@ -14,10 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use std::{ - cmp::Ordering, - collections::{BTreeMap, HashMap}, -}; +use std::{cmp::Ordering, collections::BTreeMap}; use futures::channel::oneshot; use polkadot_node_subsystem::{messages::ChainApiMessage, overseer}; @@ -50,25 +47,14 @@ const PRIORITY_QUEUE_SIZE: usize = 20_000; #[cfg(test)] const PRIORITY_QUEUE_SIZE: usize = 2; -/// Type for counting how often a candidate was added to the best effort queue. -type BestEffortCount = u32; - /// Queues for dispute participation. +/// In both queues we have a strict ordering of candidates and participation will +/// happen in that order. Refer to `CandidateComparator` for details on the ordering. pub struct Queues { /// Set of best effort participation requests. - /// - /// Note that as size is limited to `BEST_EFFORT_QUEUE_SIZE` we simply do a linear search for - /// the entry with the highest `added_count` to determine what dispute to participate next in. - /// - /// This mechanism leads to an amplifying effect - the more validators already participated, - /// the more likely it becomes that more validators will participate soon, which should lead to - /// a quick resolution of disputes, even in the best effort queue. - best_effort: HashMap, + best_effort: BTreeMap, /// Priority queue. - /// - /// In the priority queue, we have a strict ordering of candidates and participation will - /// happen in that order. priority: BTreeMap, } @@ -143,14 +129,13 @@ impl ParticipationRequest { impl Queues { /// Create new `Queues`. pub fn new() -> Self { - Self { best_effort: HashMap::new(), priority: BTreeMap::new() } + Self { best_effort: BTreeMap::new(), priority: BTreeMap::new() } } /// Will put message in queue, either priority or best effort depending on priority. /// /// If the message was already previously present on best effort, it will be moved to priority - /// if it considered priority now, otherwise the `added_count` on the best effort queue will be - /// bumped. + /// if it is considered priority now. /// /// Returns error in case a queue was found full already. pub async fn queue( @@ -159,94 +144,76 @@ impl Queues { priority: ParticipationPriority, req: ParticipationRequest, ) -> Result<()> { - let comparator = match priority { - ParticipationPriority::BestEffort => None, - ParticipationPriority::Priority => - CandidateComparator::new(sender, &req.candidate_receipt).await?, - }; - self.queue_with_comparator(comparator, req)?; + let comparator = CandidateComparator::new(sender, &req.candidate_receipt).await?; + + self.queue_with_comparator(comparator, priority, req)?; Ok(()) } - /// Get the next best request for dispute participation - /// - /// if any. Priority queue is always considered first, then the best effort queue based on - /// `added_count`. + /// Get the next best request for dispute participation if any. + /// First the priority queue is considered and then the best effort one. pub fn dequeue(&mut self) -> Option { if let Some(req) = self.pop_priority() { - // In case a candidate became best effort over time, we might have it also queued in - // the best effort queue - get rid of any such entry: - self.best_effort.remove(req.candidate_hash()); - return Some(req) + return Some(req.1) } - self.pop_best_effort() + self.pop_best_effort().map(|d| d.1) } fn queue_with_comparator( &mut self, - comparator: Option, + comparator: CandidateComparator, + priority: ParticipationPriority, req: ParticipationRequest, ) -> std::result::Result<(), QueueError> { - if let Some(comparator) = comparator { + if priority.is_priority() { if self.priority.len() >= PRIORITY_QUEUE_SIZE { return Err(QueueError::PriorityFull) } // Remove any best effort entry: - self.best_effort.remove(&req.candidate_hash); + self.best_effort.remove(&comparator); self.priority.insert(comparator, req); } else { + if self.priority.contains_key(&comparator) { + // The candidate is already in priority queue - don't + // add in in best effort too. + return Ok(()) + } if self.best_effort.len() >= BEST_EFFORT_QUEUE_SIZE { return Err(QueueError::BestEffortFull) } - // Note: The request might have been added to priority in a previous call already, we - // take care of that case in `dequeue` (more efficient). - self.best_effort - .entry(req.candidate_hash) - .or_insert(BestEffortEntry { req, added_count: 0 }) - .added_count += 1; + self.best_effort.insert(comparator, req); } Ok(()) } - /// Get the next best from the best effort queue. - /// - /// If there are multiple best - just pick one. - fn pop_best_effort(&mut self) -> Option { - let best = self.best_effort.iter().reduce(|(hash1, entry1), (hash2, entry2)| { - if entry1.added_count > entry2.added_count { - (hash1, entry1) - } else { - (hash2, entry2) - } - }); - if let Some((best_hash, _)) = best { - let best_hash = best_hash.clone(); - self.best_effort.remove(&best_hash).map(|e| e.req) - } else { - None - } + /// Get best from the best effort queue. + fn pop_best_effort(&mut self) -> Option<(CandidateComparator, ParticipationRequest)> { + return Self::pop_impl(&mut self.best_effort) } /// Get best priority queue entry. - fn pop_priority(&mut self) -> Option { + fn pop_priority(&mut self) -> Option<(CandidateComparator, ParticipationRequest)> { + return Self::pop_impl(&mut self.priority) + } + + // `pop_best_effort` and `pop_priority` do the same but on different `BTreeMap`s. This function has + // the extracted implementation + fn pop_impl( + target: &mut BTreeMap, + ) -> Option<(CandidateComparator, ParticipationRequest)> { // Once https://github.com/rust-lang/rust/issues/62924 is there, we can use a simple: - // priority.pop_first(). - if let Some((comparator, _)) = self.priority.iter().next() { + // target.pop_first(). + if let Some((comparator, _)) = target.iter().next() { let comparator = comparator.clone(); - self.priority.remove(&comparator) + target + .remove(&comparator) + .map(|participation_request| (comparator, participation_request)) } else { None } } } -/// Entry for the best effort queue. -struct BestEffortEntry { - req: ParticipationRequest, - /// How often was the above request added to the queue. - added_count: BestEffortCount, -} - /// `Comparator` for ordering of disputes for candidates. /// /// This `comparator` makes it possible to order disputes based on age and to ensure some fairness @@ -266,9 +233,12 @@ struct BestEffortEntry { #[derive(Copy, Clone)] #[cfg_attr(test, derive(Debug))] struct CandidateComparator { - /// Block number of the relay parent. + /// Block number of the relay parent. It's wrapped in an `Option<>` because there are cases when + /// it can't be obtained. For example when the node is lagging behind and new leaves are received + /// with a slight delay. Candidates with unknown relay parent are treated with the lowest priority. /// - /// Important, so we will be participating in oldest disputes first. + /// The order enforced by `CandidateComparator` is important because we want to participate in + /// the oldest disputes first. /// /// Note: In theory it would make more sense to use the `BlockNumber` of the including /// block, as inclusion time is the actual relevant event when it comes to ordering. The @@ -277,8 +247,10 @@ struct CandidateComparator { /// just using the lowest `BlockNumber` of all available including blocks - the problem is, /// that is not stable. If a new fork appears after the fact, we would start ordering the same /// candidate differently, which would result in the same candidate getting queued twice. - relay_parent_block_number: BlockNumber, - /// By adding the `CandidateHash`, we can guarantee a unique ordering across candidates. + relay_parent_block_number: Option, + /// By adding the `CandidateHash`, we can guarantee a unique ordering across candidates with the + /// same relay parent block number. Candidates without `relay_parent_block_number` are ordered by + /// the `candidate_hash` (and treated with the lowest priority, as already mentioned). candidate_hash: CandidateHash, } @@ -287,33 +259,35 @@ impl CandidateComparator { /// /// Useful for testing. #[cfg(test)] - pub fn new_dummy(block_number: BlockNumber, candidate_hash: CandidateHash) -> Self { + pub fn new_dummy(block_number: Option, candidate_hash: CandidateHash) -> Self { Self { relay_parent_block_number: block_number, candidate_hash } } /// Create a candidate comparator for a given candidate. /// /// Returns: - /// `Ok(None)` in case we could not lookup the candidate's relay parent, returns a - /// `FatalError` in case the chain API call fails with an unexpected error. + /// - `Ok(CandidateComparator{Some(relay_parent_block_number), candidate_hash})` when the + /// relay parent can be obtained. This is the happy case. + /// - `Ok(CandidateComparator{None, candidate_hash})` in case the candidate's relay parent + /// can't be obtained. + /// - `FatalError` in case the chain API call fails with an unexpected error. pub async fn new( sender: &mut impl overseer::DisputeCoordinatorSenderTrait, candidate: &CandidateReceipt, - ) -> FatalResult> { + ) -> FatalResult { let candidate_hash = candidate.hash(); - let n = match get_block_number(sender, candidate.descriptor().relay_parent).await? { - None => { - gum::warn!( - target: LOG_TARGET, - candidate_hash = ?candidate_hash, - "Candidate's relay_parent could not be found via chain API - `CandidateComparator could not be provided!" - ); - return Ok(None) - }, - Some(n) => n, - }; + let n = get_block_number(sender, candidate.descriptor().relay_parent).await?; + + if n.is_none() { + gum::warn!( + target: LOG_TARGET, + candidate_hash = ?candidate_hash, + "Candidate's relay_parent could not be found via chain API - `CandidateComparator` \ + with an empty relay parent block number will be provided!" + ); + } - Ok(Some(CandidateComparator { relay_parent_block_number: n, candidate_hash })) + Ok(CandidateComparator { relay_parent_block_number: n, candidate_hash }) } } @@ -333,11 +307,28 @@ impl PartialOrd for CandidateComparator { impl Ord for CandidateComparator { fn cmp(&self, other: &Self) -> Ordering { - match self.relay_parent_block_number.cmp(&other.relay_parent_block_number) { - Ordering::Equal => (), - o => return o, + return match (self.relay_parent_block_number, other.relay_parent_block_number) { + (None, None) => { + // No relay parents for both -> compare hashes + self.candidate_hash.cmp(&other.candidate_hash) + }, + (Some(self_relay_parent_block_num), Some(other_relay_parent_block_num)) => { + match self_relay_parent_block_num.cmp(&other_relay_parent_block_num) { + // if the relay parent is the same for both -> compare hashes + Ordering::Equal => self.candidate_hash.cmp(&other.candidate_hash), + // if not - return the result from comparing the relay parent block numbers + o => return o, + } + }, + (Some(_), None) => { + // Candidates with known relay parents are always with priority + Ordering::Less + }, + (None, Some(_)) => { + // Ditto + Ordering::Greater + }, } - self.candidate_hash.cmp(&other.candidate_hash) } } diff --git a/node/core/dispute-coordinator/src/participation/queues/tests.rs b/node/core/dispute-coordinator/src/participation/queues/tests.rs index 4e9019ebb499..b6af4bd2b55a 100644 --- a/node/core/dispute-coordinator/src/participation/queues/tests.rs +++ b/node/core/dispute-coordinator/src/participation/queues/tests.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +use crate::ParticipationPriority; use ::test_helpers::{dummy_candidate_receipt, dummy_hash}; use assert_matches::assert_matches; use polkadot_primitives::v2::{BlockNumber, Hash}; @@ -31,15 +32,16 @@ fn make_participation_request(hash: Hash) -> ParticipationRequest { /// Make dummy comparator for request, based on the given block number. fn make_dummy_comparator( req: &ParticipationRequest, - relay_parent: BlockNumber, + relay_parent: Option, ) -> CandidateComparator { CandidateComparator::new_dummy(relay_parent, *req.candidate_hash()) } /// Check that dequeuing acknowledges order. /// -/// Any priority item will be dequeued before any best effort items, priority items will be -/// processed in order. Best effort items, based on how often they have been added. +/// Any priority item will be dequeued before any best effort items, priority and best effort with +/// known parent block number items will be processed in order. Best effort items without known parent +/// block number should be treated with lowest priority. #[test] fn ordering_works_as_expected() { let mut queue = Queues::new(); @@ -47,36 +49,69 @@ fn ordering_works_as_expected() { let req_prio = make_participation_request(Hash::repeat_byte(0x02)); let req3 = make_participation_request(Hash::repeat_byte(0x03)); let req_prio_2 = make_participation_request(Hash::repeat_byte(0x04)); - let req5 = make_participation_request(Hash::repeat_byte(0x05)); + let req5_unknown_parent = make_participation_request(Hash::repeat_byte(0x05)); let req_full = make_participation_request(Hash::repeat_byte(0x06)); let req_prio_full = make_participation_request(Hash::repeat_byte(0x07)); - queue.queue_with_comparator(None, req1.clone()).unwrap(); queue - .queue_with_comparator(Some(make_dummy_comparator(&req_prio, 1)), req_prio.clone()) + .queue_with_comparator( + make_dummy_comparator(&req1, Some(1)), + ParticipationPriority::BestEffort, + req1.clone(), + ) + .unwrap(); + queue + .queue_with_comparator( + make_dummy_comparator(&req_prio, Some(1)), + ParticipationPriority::Priority, + req_prio.clone(), + ) .unwrap(); - queue.queue_with_comparator(None, req3.clone()).unwrap(); queue - .queue_with_comparator(Some(make_dummy_comparator(&req_prio_2, 2)), req_prio_2.clone()) + .queue_with_comparator( + make_dummy_comparator(&req3, Some(2)), + ParticipationPriority::BestEffort, + req3.clone(), + ) + .unwrap(); + queue + .queue_with_comparator( + make_dummy_comparator(&req_prio_2, Some(2)), + ParticipationPriority::Priority, + req_prio_2.clone(), + ) + .unwrap(); + queue + .queue_with_comparator( + make_dummy_comparator(&req5_unknown_parent, None), + ParticipationPriority::BestEffort, + req5_unknown_parent.clone(), + ) .unwrap(); - queue.queue_with_comparator(None, req3.clone()).unwrap(); - queue.queue_with_comparator(None, req5.clone()).unwrap(); assert_matches!( - queue.queue_with_comparator(Some(make_dummy_comparator(&req_prio_full, 3)), req_prio_full), + queue.queue_with_comparator( + make_dummy_comparator(&req_prio_full, Some(3)), + ParticipationPriority::Priority, + req_prio_full + ), Err(QueueError::PriorityFull) ); - assert_matches!(queue.queue_with_comparator(None, req_full), Err(QueueError::BestEffortFull)); + assert_matches!( + queue.queue_with_comparator( + make_dummy_comparator(&req_full, Some(3)), + ParticipationPriority::BestEffort, + req_full + ), + Err(QueueError::BestEffortFull) + ); + // Prioritized queue is ordered correctly assert_eq!(queue.dequeue(), Some(req_prio)); assert_eq!(queue.dequeue(), Some(req_prio_2)); + // So is the best-effort + assert_eq!(queue.dequeue(), Some(req1)); assert_eq!(queue.dequeue(), Some(req3)); - assert_matches!( - queue.dequeue(), - Some(r) => { assert!(r == req1 || r == req5) } - ); - assert_matches!( - queue.dequeue(), - Some(r) => { assert!(r == req1 || r == req5) } - ); + assert_eq!(queue.dequeue(), Some(req5_unknown_parent)); + assert_matches!(queue.dequeue(), None); } @@ -89,23 +124,50 @@ fn candidate_is_only_dequeued_once() { let req_best_effort_then_prio = make_participation_request(Hash::repeat_byte(0x03)); let req_prio_then_best_effort = make_participation_request(Hash::repeat_byte(0x04)); - queue.queue_with_comparator(None, req1.clone()).unwrap(); queue - .queue_with_comparator(Some(make_dummy_comparator(&req_prio, 1)), req_prio.clone()) + .queue_with_comparator( + make_dummy_comparator(&req1, None), + ParticipationPriority::BestEffort, + req1.clone(), + ) + .unwrap(); + queue + .queue_with_comparator( + make_dummy_comparator(&req_prio, Some(1)), + ParticipationPriority::Priority, + req_prio.clone(), + ) .unwrap(); // Insert same best effort again: - queue.queue_with_comparator(None, req1.clone()).unwrap(); + queue + .queue_with_comparator( + make_dummy_comparator(&req1, None), + ParticipationPriority::BestEffort, + req1.clone(), + ) + .unwrap(); // insert same prio again: queue - .queue_with_comparator(Some(make_dummy_comparator(&req_prio, 1)), req_prio.clone()) + .queue_with_comparator( + make_dummy_comparator(&req_prio, Some(1)), + ParticipationPriority::Priority, + req_prio.clone(), + ) .unwrap(); // Insert first as best effort: - queue.queue_with_comparator(None, req_best_effort_then_prio.clone()).unwrap(); + queue + .queue_with_comparator( + make_dummy_comparator(&req_best_effort_then_prio, Some(2)), + ParticipationPriority::BestEffort, + req_best_effort_then_prio.clone(), + ) + .unwrap(); // Then as prio: queue .queue_with_comparator( - Some(make_dummy_comparator(&req_best_effort_then_prio, 2)), + make_dummy_comparator(&req_best_effort_then_prio, Some(2)), + ParticipationPriority::Priority, req_best_effort_then_prio.clone(), ) .unwrap(); @@ -116,12 +178,19 @@ fn candidate_is_only_dequeued_once() { // Insert first as prio: queue .queue_with_comparator( - Some(make_dummy_comparator(&req_prio_then_best_effort, 3)), + make_dummy_comparator(&req_prio_then_best_effort, Some(3)), + ParticipationPriority::Priority, req_prio_then_best_effort.clone(), ) .unwrap(); // Then as best effort: - queue.queue_with_comparator(None, req_prio_then_best_effort.clone()).unwrap(); + queue + .queue_with_comparator( + make_dummy_comparator(&req_prio_then_best_effort, Some(3)), + ParticipationPriority::BestEffort, + req_prio_then_best_effort.clone(), + ) + .unwrap(); assert_eq!(queue.dequeue(), Some(req_best_effort_then_prio)); assert_eq!(queue.dequeue(), Some(req_prio_then_best_effort)); diff --git a/node/core/dispute-coordinator/src/participation/tests.rs b/node/core/dispute-coordinator/src/participation/tests.rs index 03772b1918dc..bf149a87286f 100644 --- a/node/core/dispute-coordinator/src/participation/tests.rs +++ b/node/core/dispute-coordinator/src/participation/tests.rs @@ -29,7 +29,10 @@ use parity_scale_codec::Encode; use polkadot_node_primitives::{AvailableData, BlockData, InvalidCandidate, PoV}; use polkadot_node_subsystem::{ jaeger, - messages::{AllMessages, DisputeCoordinatorMessage, RuntimeApiMessage, RuntimeApiRequest}, + messages::{ + AllMessages, ChainApiMessage, DisputeCoordinatorMessage, RuntimeApiMessage, + RuntimeApiRequest, + }, ActivatedLeaf, ActiveLeavesUpdate, LeafStatus, SpawnGlue, }; use polkadot_node_subsystem_test_helpers::{ @@ -221,9 +224,9 @@ fn same_req_wont_get_queued_if_participation_is_already_running() { #[test] fn reqs_get_queued_when_out_of_capacity() { - futures::executor::block_on(async { - let (mut ctx, mut ctx_handle) = make_our_subsystem_context(TaskExecutor::new()); + let (mut ctx, mut ctx_handle) = make_our_subsystem_context(TaskExecutor::new()); + let test = async { let (sender, mut worker_receiver) = mpsc::channel(1); let mut participation = Participation::new(sender); activate_leaf(&mut ctx, &mut participation, 10).await.unwrap(); @@ -239,43 +242,81 @@ fn reqs_get_queued_when_out_of_capacity() { } for _ in 0..MAX_PARALLEL_PARTICIPATIONS + 1 { - assert_matches!( - ctx_handle.recv().await, - AllMessages::AvailabilityRecovery( - AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx) - ) => { - tx.send(Err(RecoveryError::Unavailable)).unwrap(); - }, - "overseer did not receive recover available data message", - ); - let result = participation .get_participation_result(&mut ctx, worker_receiver.next().await.unwrap()) .await .unwrap(); - assert_matches!( result.outcome, ParticipationOutcome::Unavailable => {} ); } + // we should not have any further recovery requests: + assert_matches!(worker_receiver.next().timeout(Duration::from_millis(10)).await, None); + }; + + let request_handler = async { + let mut recover_available_data_msg_count = 0; + let mut block_number_msg_count = 0; + + while recover_available_data_msg_count < MAX_PARALLEL_PARTICIPATIONS + 1 || + block_number_msg_count < 1 + { + match ctx_handle.recv().await { + AllMessages::AvailabilityRecovery( + AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx), + ) => { + tx.send(Err(RecoveryError::Unavailable)).unwrap(); + recover_available_data_msg_count += 1; + }, + AllMessages::ChainApi(ChainApiMessage::BlockNumber(_, tx)) => { + tx.send(Ok(None)).unwrap(); + block_number_msg_count += 1; + }, + _ => assert!(false, "Received unexpected message"), + } + } - // we should not have any further results nor recovery requests: + // we should not have any further results assert_matches!(ctx_handle.recv().timeout(Duration::from_millis(10)).await, None); - assert_matches!(worker_receiver.next().timeout(Duration::from_millis(10)).await, None); - }) + }; + + futures::executor::block_on(async { + futures::join!(test, request_handler); + }); } #[test] fn reqs_get_queued_on_no_recent_block() { - futures::executor::block_on(async { - let (mut ctx, mut ctx_handle) = make_our_subsystem_context(TaskExecutor::new()); - + let (mut ctx, mut ctx_handle) = make_our_subsystem_context(TaskExecutor::new()); + let (mut unblock_test, mut wait_for_verification) = mpsc::channel(0); + let test = async { let (sender, _worker_receiver) = mpsc::channel(1); let mut participation = Participation::new(sender); participate(&mut ctx, &mut participation).await.unwrap(); - assert!(ctx_handle.recv().timeout(Duration::from_millis(10)).await.is_none()); + + // We have initiated participation but we'll block `active_leaf` so that we can check that + // the participation is queued in race-free way + let _ = wait_for_verification.next().await.unwrap(); + activate_leaf(&mut ctx, &mut participation, 10).await.unwrap(); + }; + + // Responds to messages from the test and verifies its behaviour + let request_handler = async { + // If we receive `BlockNumber` request this implicitly proves that the participation is queued + assert_matches!( + ctx_handle.recv().await, + AllMessages::ChainApi(ChainApiMessage::BlockNumber(_, tx)) => { + tx.send(Ok(None)).unwrap(); + }, + "overseer did not receive `ChainApiMessage::BlockNumber` message", + ); + + assert!(ctx_handle.recv().timeout(Duration::from_millis(10)).await.is_none()); + + // No activity so the participation is queued => unblock the test + unblock_test.send(()).await.unwrap(); // after activating at least one leaf the recent block // state should be available which should lead to trying @@ -288,7 +329,11 @@ fn reqs_get_queued_on_no_recent_block() { )), "overseer did not receive recover available data message", ); - }) + }; + + futures::executor::block_on(async { + futures::join!(test, request_handler); + }); } #[test] diff --git a/node/core/dispute-coordinator/src/tests.rs b/node/core/dispute-coordinator/src/tests.rs index d7208c1a59eb..4d32620946e0 100644 --- a/node/core/dispute-coordinator/src/tests.rs +++ b/node/core/dispute-coordinator/src/tests.rs @@ -31,7 +31,9 @@ use futures::{ use polkadot_node_subsystem_util::database::Database; -use polkadot_node_primitives::{SignedDisputeStatement, SignedFullStatement, Statement}; +use polkadot_node_primitives::{ + DisputeStatus, SignedDisputeStatement, SignedFullStatement, Statement, +}; use polkadot_node_subsystem::{ messages::{ ApprovalVotingMessage, ChainApiMessage, DisputeCoordinatorMessage, @@ -55,7 +57,9 @@ use polkadot_node_subsystem::{ messages::{AllMessages, BlockDescription, RuntimeApiMessage, RuntimeApiRequest}, ActivatedLeaf, ActiveLeavesUpdate, LeafStatus, }; -use polkadot_node_subsystem_test_helpers::{make_subsystem_context, TestSubsystemContextHandle}; +use polkadot_node_subsystem_test_helpers::{ + make_buffered_subsystem_context, TestSubsystemContextHandle, +}; use polkadot_primitives::v2::{ ApprovalVote, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, CandidateReceipt, CoreIndex, DisputeStatement, GroupIndex, Hash, HeadData, Header, IndexedVec, @@ -380,6 +384,10 @@ impl TestState { ); gum::trace!("After answering runtime API request (votes)"); }, + AllMessages::ChainApi(ChainApiMessage::BlockNumber(hash, tx)) => { + let block_num = self.headers.get(&hash).map(|header| header.number); + tx.send(Ok(block_num)).unwrap(); + }, msg => { panic!("Received unexpected message in `handle_sync_queries`: {:?}", msg); }, @@ -519,7 +527,7 @@ impl TestState { F: FnOnce(TestState, VirtualOverseer) -> BoxFuture<'static, TestState>, { self.known_session = None; - let (ctx, ctx_handle) = make_subsystem_context(TaskExecutor::new()); + let (ctx, ctx_handle) = make_buffered_subsystem_context(TaskExecutor::new(), 1); let subsystem = DisputeCoordinatorSubsystem::new( self.db.clone(), self.config.clone(), @@ -682,7 +690,10 @@ fn too_many_unconfirmed_statements_are_considered_spam() { }) .await; - assert_eq!(rx.await.unwrap(), vec![(session, candidate_hash1)]); + assert_eq!( + rx.await.unwrap(), + vec![(session, candidate_hash1, DisputeStatus::Active)] + ); let (tx, rx) = oneshot::channel(); virtual_overseer @@ -812,7 +823,10 @@ fn approval_vote_import_works() { }) .await; - assert_eq!(rx.await.unwrap(), vec![(session, candidate_hash1)]); + assert_eq!( + rx.await.unwrap(), + vec![(session, candidate_hash1, DisputeStatus::Active)] + ); let (tx, rx) = oneshot::channel(); virtual_overseer @@ -934,7 +948,10 @@ fn dispute_gets_confirmed_via_participation() { }) .await; - assert_eq!(rx.await.unwrap(), vec![(session, candidate_hash1)]); + assert_eq!( + rx.await.unwrap(), + vec![(session, candidate_hash1, DisputeStatus::Active)] + ); let (tx, rx) = oneshot::channel(); virtual_overseer @@ -1099,7 +1116,10 @@ fn dispute_gets_confirmed_at_byzantine_threshold() { }) .await; - assert_eq!(rx.await.unwrap(), vec![(session, candidate_hash1)]); + assert_eq!( + rx.await.unwrap(), + vec![(session, candidate_hash1, DisputeStatus::Confirmed)] + ); let (tx, rx) = oneshot::channel(); virtual_overseer @@ -1358,7 +1378,10 @@ fn conflicting_votes_lead_to_dispute_participation() { }) .await; - assert_eq!(rx.await.unwrap(), vec![(session, candidate_hash)]); + assert_eq!( + rx.await.unwrap(), + vec![(session, candidate_hash, DisputeStatus::Active)] + ); let (tx, rx) = oneshot::channel(); virtual_overseer @@ -2821,6 +2844,12 @@ fn negative_issue_local_statement_only_triggers_import() { }) .await; + // Assert that subsystem is not participating. + assert!(virtual_overseer.recv().timeout(TEST_TIMEOUT).await.is_none()); + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + assert!(virtual_overseer.try_recv().await.is_none()); + let backend = DbBackend::new( test_state.db.clone(), test_state.config.column_config(), @@ -2834,12 +2863,6 @@ fn negative_issue_local_statement_only_triggers_import() { let disputes = backend.load_recent_disputes().unwrap(); assert_eq!(disputes, None); - // Assert that subsystem is not participating. - assert!(virtual_overseer.recv().timeout(TEST_TIMEOUT).await.is_none()); - - virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; - assert!(virtual_overseer.try_recv().await.is_none()); - test_state }) }); @@ -2917,6 +2940,68 @@ fn redundant_votes_ignored() { }); } +#[test] +/// Make sure no disputes are recorded when there are no opposing votes, even if we reached supermajority. +fn no_onesided_disputes() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt = make_valid_candidate_receipt(); + let candidate_hash = candidate_receipt.hash(); + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; + + let mut statements = Vec::new(); + for index in 1..10 { + statements.push(( + test_state + .issue_backing_statement_with_index( + ValidatorIndex(index), + candidate_hash, + session, + ) + .await, + ValidatorIndex(index), + )); + } + + let (pending_confirmation, confirmation_rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements, + pending_confirmation: Some(pending_confirmation), + }, + }) + .await; + assert_matches!(confirmation_rx.await, Ok(ImportStatementsResult::ValidImport)); + + // We should not have any active disputes now. + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ActiveDisputes(tx), + }) + .await; + + assert!(rx.await.unwrap().is_empty()); + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + + // No more messages expected: + assert!(virtual_overseer.try_recv().await.is_none()); + + test_state + }) + }); +} + #[test] fn refrain_from_participation() { test_harness(|mut test_state, mut virtual_overseer| { diff --git a/node/core/provisioner/src/disputes/prioritized_selection/mod.rs b/node/core/provisioner/src/disputes/prioritized_selection/mod.rs index 07426ef1a75b..dcfcd0d1c2f0 100644 --- a/node/core/provisioner/src/disputes/prioritized_selection/mod.rs +++ b/node/core/provisioner/src/disputes/prioritized_selection/mod.rs @@ -139,6 +139,13 @@ where onchain.len(), ); + // Filter out unconfirmed disputes. However if the dispute is already onchain - don't skip it. + // In this case we'd better push as much fresh votes as possible to bring it to conclusion faster. + let recent_disputes = recent_disputes + .into_iter() + .filter(|d| d.2.is_confirmed_concluded() || onchain.contains_key(&(d.0, d.1))) + .collect::>(); + let partitioned = partition_recent_disputes(recent_disputes, &onchain); metrics.on_partition_recent_disputes(&partitioned); diff --git a/node/core/provisioner/src/disputes/prioritized_selection/tests.rs b/node/core/provisioner/src/disputes/prioritized_selection/tests.rs index f76107dc65d4..1f8d4f180263 100644 --- a/node/core/provisioner/src/disputes/prioritized_selection/tests.rs +++ b/node/core/provisioner/src/disputes/prioritized_selection/tests.rs @@ -426,7 +426,7 @@ impl TestDisputes { pub fn add_unconfirmed_disputes_concluded_onchain( &mut self, dispute_count: usize, - ) -> (u32, usize) { + ) -> (SessionIndex, usize) { let local_votes_count = self.validators_count * 90 / 100; let onchain_votes_count = self.validators_count * 80 / 100; let session_idx = 0; @@ -444,7 +444,7 @@ impl TestDisputes { pub fn add_unconfirmed_disputes_unconcluded_onchain( &mut self, dispute_count: usize, - ) -> (u32, usize) { + ) -> (SessionIndex, usize) { let local_votes_count = self.validators_count * 90 / 100; let onchain_votes_count = self.validators_count * 40 / 100; let session_idx = 1; @@ -459,22 +459,25 @@ impl TestDisputes { (session_idx, (local_votes_count - onchain_votes_count) * dispute_count) } - pub fn add_unconfirmed_disputes_unknown_onchain( + pub fn add_confirmed_disputes_unknown_onchain( &mut self, dispute_count: usize, - ) -> (u32, usize) { + ) -> (SessionIndex, usize) { let local_votes_count = self.validators_count * 90 / 100; let session_idx = 2; let lf = leaf(); let dummy_receipt = test_helpers::dummy_candidate_receipt(lf.hash.clone()); for _ in 0..dispute_count { - let d = (session_idx, CandidateHash(Hash::random()), DisputeStatus::Active); + let d = (session_idx, CandidateHash(Hash::random()), DisputeStatus::Confirmed); self.add_offchain_dispute(d.clone(), local_votes_count, dummy_receipt.clone()); } (session_idx, local_votes_count * dispute_count) } - pub fn add_concluded_disputes_known_onchain(&mut self, dispute_count: usize) -> (u32, usize) { + pub fn add_concluded_disputes_known_onchain( + &mut self, + dispute_count: usize, + ) -> (SessionIndex, usize) { let local_votes_count = self.validators_count * 90 / 100; let onchain_votes_count = self.validators_count * 75 / 100; let session_idx = 3; @@ -488,7 +491,10 @@ impl TestDisputes { (session_idx, (local_votes_count - onchain_votes_count) * dispute_count) } - pub fn add_concluded_disputes_unknown_onchain(&mut self, dispute_count: usize) -> (u32, usize) { + pub fn add_concluded_disputes_unknown_onchain( + &mut self, + dispute_count: usize, + ) -> (SessionIndex, usize) { let local_votes_count = self.validators_count * 90 / 100; let session_idx = 4; let lf = leaf(); @@ -500,6 +506,40 @@ impl TestDisputes { (session_idx, local_votes_count * dispute_count) } + pub fn add_unconfirmed_disputes_known_onchain( + &mut self, + dispute_count: usize, + ) -> (SessionIndex, usize) { + let local_votes_count = self.validators_count * 10 / 100; + let onchain_votes_count = self.validators_count * 10 / 100; + let session_idx = 5; + let lf = leaf(); + let dummy_receipt = test_helpers::dummy_candidate_receipt(lf.hash.clone()); + for _ in 0..dispute_count { + let d = (session_idx, CandidateHash(Hash::random()), DisputeStatus::Active); + self.add_offchain_dispute(d.clone(), local_votes_count, dummy_receipt.clone()); + self.add_onchain_dispute(d, onchain_votes_count); + } + + (session_idx, (local_votes_count - onchain_votes_count) * dispute_count) + } + + pub fn add_unconfirmed_disputes_unknown_onchain( + &mut self, + dispute_count: usize, + ) -> (SessionIndex, usize) { + let local_votes_count = self.validators_count * 10 / 100; + let session_idx = 6; + let lf = leaf(); + let dummy_receipt = test_helpers::dummy_candidate_receipt(lf.hash.clone()); + for _ in 0..dispute_count { + let d = (session_idx, CandidateHash(Hash::random()), DisputeStatus::Active); + self.add_offchain_dispute(d.clone(), local_votes_count, dummy_receipt.clone()); + } + + (session_idx, local_votes_count * dispute_count) + } + fn generate_local_votes( statement_kind: T, start_idx: usize, @@ -554,9 +594,9 @@ fn normal_flow() { // concluded disputes known onchain - these should be ignored let (_, _) = input.add_concluded_disputes_known_onchain(DISPUTES_PER_BATCH); - // active disputes unknown onchain + // confirmed disputes unknown onchain let (second_idx, second_votes) = - input.add_unconfirmed_disputes_unknown_onchain(DISPUTES_PER_BATCH); + input.add_confirmed_disputes_unknown_onchain(DISPUTES_PER_BATCH); let metrics = metrics::Metrics::new_dummy(); let mut vote_queries: usize = 0; @@ -635,8 +675,8 @@ fn many_batches() { // concluded disputes known onchain input.add_concluded_disputes_known_onchain(DISPUTES_PER_PARTITION); - // active disputes unknown onchain - input.add_unconfirmed_disputes_unknown_onchain(DISPUTES_PER_PARTITION); + // confirmed disputes unknown onchain + input.add_confirmed_disputes_unknown_onchain(DISPUTES_PER_PARTITION); let metrics = metrics::Metrics::new_dummy(); let mut vote_queries: usize = 0; @@ -720,3 +760,30 @@ fn votes_above_limit() { ACCEPTABLE_RUNTIME_VOTES_QUERIES_COUNT ); } + +#[test] +fn unconfirmed_are_handled_correctly() { + const VALIDATOR_COUNT: usize = 10; + const DISPUTES_PER_PARTITION: usize = 50; + + let mut input = TestDisputes::new(VALIDATOR_COUNT); + + // Add unconfirmed known onchain -> this should be pushed + let (pushed_idx, _) = input.add_unconfirmed_disputes_known_onchain(DISPUTES_PER_PARTITION); + + // Add unconfirmed unknown onchain -> this should be ignored + input.add_unconfirmed_disputes_unknown_onchain(DISPUTES_PER_PARTITION); + + let metrics = metrics::Metrics::new_dummy(); + let mut vote_queries: usize = 0; + test_harness( + |r| mock_overseer(r, &mut input, &mut vote_queries), + |mut tx: TestSubsystemSender| async move { + let lf = leaf(); + let result = select_disputes(&mut tx, &metrics, &lf).await; + + assert!(result.len() == DISPUTES_PER_PARTITION); + result.iter().for_each(|d| assert!(d.session == pushed_idx)); + }, + ); +} diff --git a/node/core/provisioner/src/disputes/random_selection/mod.rs b/node/core/provisioner/src/disputes/random_selection/mod.rs index a25d3445ac6e..9a827475aa55 100644 --- a/node/core/provisioner/src/disputes/random_selection/mod.rs +++ b/node/core/provisioner/src/disputes/random_selection/mod.rs @@ -42,51 +42,35 @@ enum RequestType { } /// Request open disputes identified by `CandidateHash` and the `SessionIndex`. -async fn request_disputes( +/// Returns only confirmed/concluded disputes. The rest are filtered out. +async fn request_confirmed_disputes( sender: &mut impl overseer::ProvisionerSenderTrait, active_or_recent: RequestType, ) -> Vec<(SessionIndex, CandidateHash)> { - let disputes = match active_or_recent { - RequestType::Recent => { - let (tx, rx) = oneshot::channel(); - let msg = DisputeCoordinatorMessage::RecentDisputes(tx); - sender.send_unbounded_message(msg); - let recent_disputes = match rx.await { - Ok(r) => r, - Err(oneshot::Canceled) => { - gum::warn!( - target: LOG_TARGET, - "Channel closed: unable to gather {:?} disputes", - active_or_recent - ); - Vec::new() - }, - }; - recent_disputes - .into_iter() - .map(|(sesion_idx, candodate_hash, _)| (sesion_idx, candodate_hash)) - .collect::>() - }, - RequestType::Active => { - let (tx, rx) = oneshot::channel(); - let msg = DisputeCoordinatorMessage::ActiveDisputes(tx); - sender.send_unbounded_message(msg); - let active_disputes = match rx.await { - Ok(r) => r, - Err(oneshot::Canceled) => { - gum::warn!( - target: LOG_TARGET, - "Unable to gather {:?} disputes", - active_or_recent - ); - Vec::new() - }, - }; - active_disputes + let (tx, rx) = oneshot::channel(); + let msg = match active_or_recent { + RequestType::Recent => DisputeCoordinatorMessage::RecentDisputes(tx), + RequestType::Active => DisputeCoordinatorMessage::ActiveDisputes(tx), + }; + + sender.send_unbounded_message(msg); + let disputes = match rx.await { + Ok(r) => r, + Err(oneshot::Canceled) => { + gum::warn!( + target: LOG_TARGET, + "Channel closed: unable to gather {:?} disputes", + active_or_recent + ); + Vec::new() }, }; disputes + .into_iter() + .filter(|d| d.2.is_confirmed_concluded()) + .map(|d| (d.0, d.1)) + .collect() } /// Extend `acc` by `n` random, picks of not-yet-present in `acc` items of `recent` without repetition and additions of recent. @@ -132,7 +116,7 @@ where // In case of an overload condition, we limit ourselves to active disputes, and fill up to the // upper bound of disputes to pass to wasm `fn create_inherent_data`. // If the active ones are already exceeding the bounds, randomly select a subset. - let recent = request_disputes(sender, RequestType::Recent).await; + let recent = request_confirmed_disputes(sender, RequestType::Recent).await; let disputes = if recent.len() > MAX_DISPUTES_FORWARDED_TO_RUNTIME { gum::warn!( target: LOG_TARGET, @@ -140,7 +124,7 @@ where recent.len(), MAX_DISPUTES_FORWARDED_TO_RUNTIME ); - let mut active = request_disputes(sender, RequestType::Active).await; + let mut active = request_confirmed_disputes(sender, RequestType::Active).await; let n_active = active.len(); let active = if active.len() > MAX_DISPUTES_FORWARDED_TO_RUNTIME { let mut picked = Vec::with_capacity(MAX_DISPUTES_FORWARDED_TO_RUNTIME); diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs index bd343ded1964..ce32f9a943f4 100644 --- a/node/core/provisioner/src/lib.rs +++ b/node/core/provisioner/src/lib.rs @@ -188,9 +188,8 @@ async fn handle_active_leaves_update( per_relay_parent.remove(deactivated); } - for leaf in update.activated { + if let Some(leaf) = update.activated { let prospective_parachains_mode = prospective_parachains_mode(sender, leaf.hash).await?; - let delay_fut = Delay::new(PRE_PROPOSE_TIMEOUT).map(move |_| leaf.hash).boxed(); per_relay_parent.insert(leaf.hash, PerRelayParent::new(leaf, prospective_parachains_mode)); inherent_delays.push(delay_fut); diff --git a/node/core/pvf/src/executor_intf.rs b/node/core/pvf/src/executor_intf.rs index 6827fb636eac..bbeb6195e1dc 100644 --- a/node/core/pvf/src/executor_intf.rs +++ b/node/core/pvf/src/executor_intf.rs @@ -96,7 +96,7 @@ pub fn prevalidate(code: &[u8]) -> Result Result, sc_executor_common::error::WasmError> { sc_executor_wasmtime::prepare_runtime_artifact(blob, &CONFIG.semantics) } diff --git a/node/core/pvf/src/lib.rs b/node/core/pvf/src/lib.rs index ef5f31889237..1aabb1100437 100644 --- a/node/core/pvf/src/lib.rs +++ b/node/core/pvf/src/lib.rs @@ -16,18 +16,27 @@ #![warn(missing_docs)] -//! A crate that implements PVF validation host. +//! A crate that implements the PVF validation host. +//! +//! For more background, refer to the Implementer's Guide: [PVF +//! Pre-checking](https://paritytech.github.io/polkadot/book/pvf-prechecking.html) and [Candidate +//! Validation](https://paritytech.github.io/polkadot/book/node/utility/candidate-validation.html#pvf-host). +//! +//! # Entrypoint //! //! This crate provides a simple API. You first [`start`] the validation host, which gives you the //! [handle][`ValidationHost`] and the future you need to poll. //! -//! Then using the handle the client can send two types of requests: +//! Then using the handle the client can send three types of requests: +//! +//! (a) PVF pre-checking. This takes the PVF [code][`Pvf`] and tries to prepare it (verify and +//! compile) in order to pre-check its validity. //! -//! (a) PVF execution. This accepts the PVF [`params`][`polkadot_parachain::primitives::ValidationParams`] +//! (b) PVF execution. This accepts the PVF [`params`][`polkadot_parachain::primitives::ValidationParams`] //! and the PVF [code][`Pvf`], prepares (verifies and compiles) the code, and then executes PVF //! with the `params`. //! -//! (b) Heads up. This request allows to signal that the given PVF may be needed soon and that it +//! (c) Heads up. This request allows to signal that the given PVF may be needed soon and that it //! should be prepared for execution. //! //! The preparation results are cached for some time after they either used or was signaled in heads up. @@ -39,7 +48,7 @@ //! PVF execution requests can specify the [priority][`Priority`] with which the given request should //! be handled. Different priority levels have different effects. This is discussed below. //! -//! Preparation started by a heads up signal always starts in with the background priority. If there +//! Preparation started by a heads up signal always starts with the background priority. If there //! is already a request for that PVF preparation under way the priority is inherited. If after heads //! up, a new PVF execution request comes in with a higher priority, then the original task's priority //! will be adjusted to match the new one if it's larger. @@ -48,6 +57,8 @@ //! //! # Under the hood //! +//! ## The flow +//! //! Under the hood, the validation host is built using a bunch of communicating processes, not //! dissimilar to actors. Each of such "processes" is a future task that contains an event loop that //! processes incoming messages, potentially delegating sub-tasks to other "processes". @@ -55,11 +66,13 @@ //! Two of these processes are queues. The first one is for preparation jobs and the second one is for //! execution. Both of the queues are backed by separate pools of workers of different kind. //! -//! Preparation workers handle preparation requests by preverifying and instrumenting PVF wasm code, +//! Preparation workers handle preparation requests by prevalidating and instrumenting PVF wasm code, //! and then passing it into the compiler, to prepare the artifact. //! -//! Artifact is a final product of preparation. If the preparation succeeded, then the artifact will -//! contain the compiled code usable for quick execution by a worker later on. +//! ## Artifacts +//! +//! An artifact is the final product of preparation. If the preparation succeeded, then the artifact +//! will contain the compiled code usable for quick execution by a worker later on. //! //! If the preparation failed, then the worker will still write the artifact with the error message. //! We save the artifact with the error so that we don't try to prepare the artifacts that are broken @@ -68,12 +81,14 @@ //! The artifact is saved on disk and is also tracked by an in memory table. This in memory table //! doesn't contain the artifact contents though, only a flag that the given artifact is compiled. //! +//! A pruning task will run at a fixed interval of time. This task will remove all artifacts that +//! weren't used or received a heads up signal for a while. +//! +//! ## Execution +//! //! The execute workers will be fed by the requests from the execution queue, which is basically a //! combination of a path to the compiled artifact and the //! [`params`][`polkadot_parachain::primitives::ValidationParams`]. -//! -//! Each fixed interval of time a pruning task will run. This task will remove all artifacts that -//! weren't used or received a heads up signal for a while. mod artifacts; mod error; diff --git a/node/core/pvf/src/metrics.rs b/node/core/pvf/src/metrics.rs index 20965ec7dbd7..8db105d895ea 100644 --- a/node/core/pvf/src/metrics.rs +++ b/node/core/pvf/src/metrics.rs @@ -183,6 +183,9 @@ impl metrics::Metrics for Metrics { ).buckets(vec![ // This is synchronized with `APPROVAL_EXECUTION_TIMEOUT` and // `BACKING_EXECUTION_TIMEOUT` constants in `node/primitives/src/lib.rs` + 0.01, + 0.025, + 0.05, 0.1, 0.25, 0.5, @@ -192,6 +195,9 @@ impl metrics::Metrics for Metrics { 4.0, 5.0, 6.0, + 8.0, + 10.0, + 12.0, ]), )?, registry, diff --git a/node/core/pvf/src/priority.rs b/node/core/pvf/src/priority.rs index de169be0696b..b80c9195832a 100644 --- a/node/core/pvf/src/priority.rs +++ b/node/core/pvf/src/priority.rs @@ -24,7 +24,7 @@ pub enum Priority { Normal, /// This priority is used for requests that are required to be processed as soon as possible. /// - /// For example, backing is on critical path and require execution as soon as possible. + /// For example, backing is on a critical path and requires execution as soon as possible. Critical, } diff --git a/node/network/approval-distribution/src/metrics.rs b/node/network/approval-distribution/src/metrics.rs index c0887b25f7f4..b14e54c57403 100644 --- a/node/network/approval-distribution/src/metrics.rs +++ b/node/network/approval-distribution/src/metrics.rs @@ -127,7 +127,7 @@ impl MetricsTrait for Metrics { prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( "polkadot_parachain_time_unify_with_peer", "Time spent within fn `unify_with_peer`.", - ))?, + ).buckets(vec![0.000625, 0.00125,0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0,]))?, registry, )?, time_import_pending_now_known: prometheus::register( diff --git a/node/network/availability-distribution/src/tests/state.rs b/node/network/availability-distribution/src/tests/state.rs index c021f1bfb81b..be77aa2d023c 100644 --- a/node/network/availability-distribution/src/tests/state.rs +++ b/node/network/availability-distribution/src/tests/state.rs @@ -51,7 +51,7 @@ use polkadot_primitives::v2::{ CandidateHash, CoreState, GroupIndex, Hash, Id as ParaId, ScheduledCore, SessionInfo, ValidatorIndex, }; -use test_helpers::{mock::make_ferdie_keystore, SingleItemSink}; +use test_helpers::mock::make_ferdie_keystore; use super::mock::{make_session_info, OccupiedCoreBuilder}; use crate::LOG_TARGET; @@ -295,7 +295,7 @@ impl TestState { } async fn overseer_signal( - mut tx: SingleItemSink>, + mut tx: mpsc::Sender>, msg: impl Into, ) { let msg = msg.into(); diff --git a/node/network/availability-recovery/src/lib.rs b/node/network/availability-recovery/src/lib.rs index 4ca28c955f9e..5a2905082379 100644 --- a/node/network/availability-recovery/src/lib.rs +++ b/node/network/availability-recovery/src/lib.rs @@ -836,7 +836,7 @@ async fn handle_signal(state: &mut State, signal: OverseerSignal) -> SubsystemRe OverseerSignal::Conclude => Ok(true), OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { activated, .. }) => { // if activated is non-empty, set state.live_block to the highest block in `activated` - for activated in activated { + if let Some(activated) = activated { if activated.number > state.live_block.0 { state.live_block = (activated.number, activated.hash) } diff --git a/node/network/bitfield-distribution/src/lib.rs b/node/network/bitfield-distribution/src/lib.rs index 8c34feed8eb1..e1df82ca2e47 100644 --- a/node/network/bitfield-distribution/src/lib.rs +++ b/node/network/bitfield-distribution/src/lib.rs @@ -274,7 +274,7 @@ impl BitfieldDistribution { })) => { let _timer = self.metrics.time_active_leaves_update(); - for activated in activated { + if let Some(activated) = activated { let relay_parent = activated.hash; gum::trace!(target: LOG_TARGET, ?relay_parent, "activated"); diff --git a/node/network/bridge/src/rx/mod.rs b/node/network/bridge/src/rx/mod.rs index 71bf70f0541c..8d2290ca3495 100644 --- a/node/network/bridge/src/rx/mod.rs +++ b/node/network/bridge/src/rx/mod.rs @@ -126,7 +126,7 @@ where let future = run_network_in(self, ctx, network_stream) .map_err(|e| SubsystemError::with_origin("network-bridge", e)) .boxed(); - SpawnedSubsystem { name: "network-bridge-subsystem", future } + SpawnedSubsystem { name: "network-bridge-rx-subsystem", future } } } @@ -619,7 +619,7 @@ where num_deactivated = %deactivated.len(), ); - for activated in activated { + if let Some(activated) = activated { let pos = live_heads .binary_search_by(|probe| probe.number.cmp(&activated.number).reverse()) .unwrap_or_else(|i| i); diff --git a/node/network/bridge/src/tx/mod.rs b/node/network/bridge/src/tx/mod.rs index b3b39f0f7f0e..5f5d579d70e5 100644 --- a/node/network/bridge/src/tx/mod.rs +++ b/node/network/bridge/src/tx/mod.rs @@ -90,7 +90,7 @@ where let future = run_network_out(self, ctx) .map_err(|e| SubsystemError::with_origin("network-bridge", e)) .boxed(); - SpawnedSubsystem { name: "network-bridge-subsystem", future } + SpawnedSubsystem { name: "network-bridge-tx-subsystem", future } } } diff --git a/node/network/dispute-distribution/src/receiver/mod.rs b/node/network/dispute-distribution/src/receiver/mod.rs index 158c66e20655..9030fc0b3f96 100644 --- a/node/network/dispute-distribution/src/receiver/mod.rs +++ b/node/network/dispute-distribution/src/receiver/mod.rs @@ -302,6 +302,12 @@ where // Queue request: if let Err((authority_id, req)) = self.peer_queues.push_req(authority_id, req) { + gum::debug!( + target: LOG_TARGET, + ?authority_id, + ?peer, + "Peer hit the rate limit - dropping message." + ); req.send_outgoing_response(OutgoingResponse { result: Err(()), reputation_changes: vec![COST_APPARENT_FLOOD], diff --git a/node/network/dispute-distribution/src/sender/mod.rs b/node/network/dispute-distribution/src/sender/mod.rs index 331fe12c61e2..b25561df5652 100644 --- a/node/network/dispute-distribution/src/sender/mod.rs +++ b/node/network/dispute-distribution/src/sender/mod.rs @@ -108,8 +108,6 @@ impl DisputeSender { runtime: &mut RuntimeInfo, msg: DisputeMessage, ) -> Result<()> { - self.rate_limit.limit().await; - let req: DisputeRequest = msg.into(); let candidate_hash = req.0.candidate_receipt.hash(); match self.disputes.entry(candidate_hash) { @@ -118,6 +116,8 @@ impl DisputeSender { return Ok(()) }, Entry::Vacant(vacant) => { + self.rate_limit.limit("in start_sender", candidate_hash).await; + let send_task = SendTask::new( ctx, runtime, @@ -169,10 +169,12 @@ impl DisputeSender { // Iterates in order of insertion: let mut should_rate_limit = true; - for dispute in self.disputes.values_mut() { + for (candidate_hash, dispute) in self.disputes.iter_mut() { if have_new_sessions || dispute.has_failed_sends() { if should_rate_limit { - self.rate_limit.limit().await; + self.rate_limit + .limit("while going through new sessions/failed sends", *candidate_hash) + .await; } let sends_happened = dispute .refresh_sends(ctx, runtime, &self.active_sessions, &self.metrics) @@ -193,7 +195,7 @@ impl DisputeSender { // recovered at startup will be relatively "old" anyway and we assume that no more than a // third of the validators will go offline at any point in time anyway. for dispute in unknown_disputes { - self.rate_limit.limit().await; + self.rate_limit.limit("while going through unknown disputes", dispute.1).await; self.start_send_for_dispute(ctx, runtime, dispute).await?; } Ok(()) @@ -383,7 +385,9 @@ impl RateLimit { } /// Wait until ready and prepare for next call. - async fn limit(&mut self) { + /// + /// String given as occasion and candidate hash are logged in case the rate limit hit. + async fn limit(&mut self, occasion: &'static str, candidate_hash: CandidateHash) { // Wait for rate limit and add some logging: poll_fn(|cx| { let old_limit = Pin::new(&mut self.limit); @@ -391,6 +395,8 @@ impl RateLimit { Poll::Pending => { gum::debug!( target: LOG_TARGET, + ?occasion, + ?candidate_hash, "Sending rate limit hit, slowing down requests" ); Poll::Pending @@ -430,7 +436,9 @@ async fn get_active_disputes( // Caller scope is in `update_leaves` and this is bounded by fork count. ctx.send_unbounded_message(DisputeCoordinatorMessage::ActiveDisputes(tx)); - rx.await.map_err(|_| JfyiError::AskActiveDisputesCanceled) + rx.await + .map_err(|_| JfyiError::AskActiveDisputesCanceled) + .map(|disputes| disputes.into_iter().map(|d| (d.0, d.1)).collect()) } /// Get all locally available dispute votes for a given dispute. diff --git a/node/network/dispute-distribution/src/tests/mod.rs b/node/network/dispute-distribution/src/tests/mod.rs index 56cdd467fd62..d6381239965b 100644 --- a/node/network/dispute-distribution/src/tests/mod.rs +++ b/node/network/dispute-distribution/src/tests/mod.rs @@ -45,7 +45,7 @@ use polkadot_node_network_protocol::{ request_response::{v1::DisputeResponse, Recipient, Requests}, IfDisconnected, }; -use polkadot_node_primitives::{CandidateVotes, UncheckedDisputeMessage}; +use polkadot_node_primitives::{CandidateVotes, DisputeStatus, UncheckedDisputeMessage}; use polkadot_node_subsystem::{ messages::{ AllMessages, DisputeCoordinatorMessage, DisputeDistributionMessage, ImportStatementsResult, @@ -658,7 +658,7 @@ fn dispute_retries_and_works_across_session_boundaries() { Some(old_head), MOCK_SESSION_INDEX, None, - vec![(MOCK_SESSION_INDEX, candidate.hash())], + vec![(MOCK_SESSION_INDEX, candidate.hash(), DisputeStatus::Active)], ) .await; @@ -673,7 +673,7 @@ fn dispute_retries_and_works_across_session_boundaries() { Some(old_head2), MOCK_NEXT_SESSION_INDEX, Some(MOCK_NEXT_SESSION_INFO.clone()), - vec![(MOCK_SESSION_INDEX, candidate.hash())], + vec![(MOCK_SESSION_INDEX, candidate.hash(), DisputeStatus::Active)], ) .await; @@ -832,7 +832,7 @@ async fn activate_leaf( // New session if we expect the subsystem to request it. new_session: Option, // Currently active disputes to send to the subsystem. - active_disputes: Vec<(SessionIndex, CandidateHash)>, + active_disputes: Vec<(SessionIndex, CandidateHash, DisputeStatus)>, ) { let has_active_disputes = !active_disputes.is_empty(); handle @@ -934,7 +934,10 @@ async fn handle_subsystem_startup( None, MOCK_SESSION_INDEX, Some(MOCK_SESSION_INFO.clone()), - ongoing_dispute.into_iter().map(|c| (MOCK_SESSION_INDEX, c)).collect(), + ongoing_dispute + .into_iter() + .map(|c| (MOCK_SESSION_INDEX, c, DisputeStatus::Active)) + .collect(), ) .await; relay_parent diff --git a/node/network/protocol/src/request_response/mod.rs b/node/network/protocol/src/request_response/mod.rs index 13541f810f91..4a648087eb4b 100644 --- a/node/network/protocol/src/request_response/mod.rs +++ b/node/network/protocol/src/request_response/mod.rs @@ -131,6 +131,17 @@ const STATEMENT_RESPONSE_SIZE: u64 = MAX_CODE_SIZE as u64 + 10_000; pub const DISPUTE_REQUEST_TIMEOUT: Duration = Duration::from_secs(12); impl Protocol { + /// Get a configuration for a given Request response protocol. + /// + /// Returns a `ProtocolConfig` for this protocol. + /// Use this if you plan only to send requests for this protocol. + pub fn get_outbound_only_config( + self, + req_protocol_names: &ReqProtocolNames, + ) -> RequestResponseConfig { + self.create_config(req_protocol_names, None) + } + /// Get a configuration for a given Request response protocol. /// /// Returns a receiver for messages received on this protocol and the requested @@ -139,10 +150,19 @@ impl Protocol { self, req_protocol_names: &ReqProtocolNames, ) -> (mpsc::Receiver, RequestResponseConfig) { + let (tx, rx) = mpsc::channel(self.get_channel_size()); + let cfg = self.create_config(req_protocol_names, Some(tx)); + (rx, cfg) + } + + fn create_config( + self, + req_protocol_names: &ReqProtocolNames, + tx: Option>, + ) -> RequestResponseConfig { let name = req_protocol_names.get_name(self); let fallback_names = self.get_fallback_names(); - let (tx, rx) = mpsc::channel(self.get_channel_size()); - let cfg = match self { + match self { Protocol::ChunkFetchingV1 => RequestResponseConfig { name, fallback_names, @@ -150,7 +170,7 @@ impl Protocol { max_response_size: POV_RESPONSE_SIZE as u64 * 3, // We are connected to all validators: request_timeout: CHUNK_REQUEST_TIMEOUT, - inbound_queue: Some(tx), + inbound_queue: tx, }, Protocol::CollationFetchingV1 | Protocol::CollationFetchingVStaging => RequestResponseConfig { @@ -160,7 +180,7 @@ impl Protocol { max_response_size: POV_RESPONSE_SIZE, // Taken from initial implementation in collator protocol: request_timeout: POV_REQUEST_TIMEOUT_CONNECTED, - inbound_queue: Some(tx), + inbound_queue: tx, }, Protocol::PoVFetchingV1 => RequestResponseConfig { name, @@ -168,7 +188,7 @@ impl Protocol { max_request_size: 1_000, max_response_size: POV_RESPONSE_SIZE, request_timeout: POV_REQUEST_TIMEOUT_CONNECTED, - inbound_queue: Some(tx), + inbound_queue: tx, }, Protocol::AvailableDataFetchingV1 => RequestResponseConfig { name, @@ -177,7 +197,7 @@ impl Protocol { // Available data size is dominated by the PoV size. max_response_size: POV_RESPONSE_SIZE, request_timeout: POV_REQUEST_TIMEOUT_CONNECTED, - inbound_queue: Some(tx), + inbound_queue: tx, }, Protocol::StatementFetchingV1 => RequestResponseConfig { name, @@ -195,7 +215,7 @@ impl Protocol { // fail, but this is desired, so we can quickly move on to a faster one - we should // also decrease its reputation. request_timeout: Duration::from_secs(1), - inbound_queue: Some(tx), + inbound_queue: tx, }, Protocol::DisputeSendingV1 => RequestResponseConfig { name, @@ -205,10 +225,9 @@ impl Protocol { /// plenty. max_response_size: 100, request_timeout: DISPUTE_REQUEST_TIMEOUT, - inbound_queue: Some(tx), + inbound_queue: tx, }, - }; - (rx, cfg) + } } // Channel sizes for the supported protocols. diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 9731818a459a..ff3d42aa65ba 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -207,7 +207,7 @@ impl StatementDistributionSubsystem { crate::legacy_v1::handle_deactivate_leaf(legacy_v1_state, deactivated); } - for activated in activated { + if let Some(activated) = activated { // TODO [now]: legacy, activate only if no prospective parachains support. crate::legacy_v1::handle_activated_leaf(ctx, legacy_v1_state, activated) .await?; diff --git a/node/primitives/src/disputes/message.rs b/node/primitives/src/disputes/message.rs index 93d4e804f906..1a943f8dcee6 100644 --- a/node/primitives/src/disputes/message.rs +++ b/node/primitives/src/disputes/message.rs @@ -33,6 +33,9 @@ use polkadot_primitives::v2::{ /// /// And most likely has been constructed correctly. This is used with /// `DisputeDistributionMessage::SendDispute` for sending out votes. +/// +/// NOTE: This is sent over the wire, any changes are a change in protocol and need to be +/// versioned. #[derive(Debug, Clone)] pub struct DisputeMessage(UncheckedDisputeMessage); diff --git a/node/primitives/src/disputes/status.rs b/node/primitives/src/disputes/status.rs index c0ffb907423b..7cc26c1a1f7a 100644 --- a/node/primitives/src/disputes/status.rs +++ b/node/primitives/src/disputes/status.rs @@ -19,8 +19,12 @@ use parity_scale_codec::{Decode, Encode}; /// Timestamp based on the 1 Jan 1970 UNIX base, which is persistent across node restarts and OS reboots. pub type Timestamp = u64; -/// The status of dispute. This is a state machine which can be altered by the -/// helper methods. +/// The status of dispute. +/// +/// As managed by the dispute coordinator. +/// +/// NOTE: This status is persisted to the database, any changes have to be versioned and a db +/// migration will be needed. #[derive(Debug, Clone, Copy, Encode, Decode, PartialEq)] pub enum DisputeStatus { /// The dispute is active and unconcluded. @@ -69,9 +73,24 @@ impl DisputeStatus { } } + /// Concluded valid? + pub fn has_concluded_for(&self) -> bool { + match self { + &DisputeStatus::ConcludedFor(_) => true, + _ => false, + } + } + /// Concluded invalid? + pub fn has_concluded_against(&self) -> bool { + match self { + &DisputeStatus::ConcludedAgainst(_) => true, + _ => false, + } + } + /// Transition the status to a new status after observing the dispute has concluded for the candidate. /// This may be a no-op if the status was already concluded. - pub fn concluded_for(self, now: Timestamp) -> DisputeStatus { + pub fn conclude_for(self, now: Timestamp) -> DisputeStatus { match self { DisputeStatus::Active | DisputeStatus::Confirmed => DisputeStatus::ConcludedFor(now), DisputeStatus::ConcludedFor(at) => DisputeStatus::ConcludedFor(std::cmp::min(at, now)), @@ -81,7 +100,7 @@ impl DisputeStatus { /// Transition the status to a new status after observing the dispute has concluded against the candidate. /// This may be a no-op if the status was already concluded. - pub fn concluded_against(self, now: Timestamp) -> DisputeStatus { + pub fn conclude_against(self, now: Timestamp) -> DisputeStatus { match self { DisputeStatus::Active | DisputeStatus::Confirmed => DisputeStatus::ConcludedAgainst(now), diff --git a/node/service/chain-specs/kusama.json b/node/service/chain-specs/kusama.json index 5d369a723dfb..581f40aff4d6 100644 --- a/node/service/chain-specs/kusama.json +++ b/node/service/chain-specs/kusama.json @@ -21,7 +21,11 @@ "/dns/kusama-bootnode-1.paritytech.net/tcp/30333/p2p/12D3KooWQKqane1SqWJNWMQkbia9qiMWXkcHtAdfW5eVF8hbwEDw", "/dns/kusama-bootnode.dwellir.com/tcp/30333/ws/p2p/12D3KooWFj2ndawdYyk2spc42Y2arYwb2TUoHLHFAsKuHRzWXwoJ", "/dns/boot.stake.plus/tcp/31333/p2p/12D3KooWLa1UyG5xLPds2GbiRBCTJjpsVwRWHWN7Dff14yiNJRpR", - "/dns/boot.stake.plus/tcp/31334/wss/p2p/12D3KooWLa1UyG5xLPds2GbiRBCTJjpsVwRWHWN7Dff14yiNJRpR" + "/dns/boot.stake.plus/tcp/31334/wss/p2p/12D3KooWLa1UyG5xLPds2GbiRBCTJjpsVwRWHWN7Dff14yiNJRpR", + "/dns/boot-node.helikon.io/tcp/7060/p2p/12D3KooWL4KPqfAsPE2aY1g5Zo1CxsDwcdJ7mmAghK7cg6M2fdbD", + "/dns/boot-node.helikon.io/tcp/7062/wss/p2p/12D3KooWL4KPqfAsPE2aY1g5Zo1CxsDwcdJ7mmAghK7cg6M2fdbD", + "/dns/kusama.bootnode.amforc.com/tcp/30333/p2p/12D3KooWLx6nsj6Fpd8biP1VDyuCUjazvRiGWyBam8PsqRJkbUb9", + "/dns/kusama.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWLx6nsj6Fpd8biP1VDyuCUjazvRiGWyBam8PsqRJkbUb9" ], "telemetryEndpoints": [ [ diff --git a/node/service/chain-specs/polkadot.json b/node/service/chain-specs/polkadot.json index 1a5ef94987de..8fc8ac63ba32 100644 --- a/node/service/chain-specs/polkadot.json +++ b/node/service/chain-specs/polkadot.json @@ -21,7 +21,11 @@ "/dns/cc1-1.parity.tech/tcp/30333/p2p/12D3KooWFN2mhgpkJsDBuNuE5427AcDrsib8EoqGMZmkxWwx3Md4", "/dns/polkadot-bootnode.dwellir.com/tcp/30333/ws/p2p/12D3KooWKvdDyRKqUfSAaUCbYiLwKY8uK3wDWpCuy2FiDLbkPTDJ", "/dns/boot.stake.plus/tcp/30333/p2p/12D3KooWKT4ZHNxXH4icMjdrv7EwWBkfbz5duxE5sdJKKeWFYi5n", - "/dns/boot.stake.plus/tcp/30334/wss/p2p/12D3KooWKT4ZHNxXH4icMjdrv7EwWBkfbz5duxE5sdJKKeWFYi5n" + "/dns/boot.stake.plus/tcp/30334/wss/p2p/12D3KooWKT4ZHNxXH4icMjdrv7EwWBkfbz5duxE5sdJKKeWFYi5n", + "/dns/boot-node.helikon.io/tcp/7070/p2p/12D3KooWS9ZcvRxyzrSf6p63QfTCWs12nLoNKhGux865crgxVA4H", + "/dns/boot-node.helikon.io/tcp/7072/wss/p2p/12D3KooWS9ZcvRxyzrSf6p63QfTCWs12nLoNKhGux865crgxVA4H", + "/dns/polkadot.bootnode.amforc.com/tcp/30333/p2p/12D3KooWAsuCEVCzUVUrtib8W82Yne3jgVGhQZN3hizko5FTnDg3", + "/dns/polkadot.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWAsuCEVCzUVUrtib8W82Yne3jgVGhQZN3hizko5FTnDg3" ], "telemetryEndpoints": [ [ diff --git a/node/service/chain-specs/westend.json b/node/service/chain-specs/westend.json index 03f1f5ecf6b0..059f5c39b6e2 100644 --- a/node/service/chain-specs/westend.json +++ b/node/service/chain-specs/westend.json @@ -13,7 +13,9 @@ "/dns/westend-connect-0.polkadot.io/tcp/443/wss/p2p/12D3KooWNg8iUqhux7X7voNU9Nty5pzehrFJwkQwg1CJnqN3CTzE", "/dns/westend-connect-1.polkadot.io/tcp/443/wss/p2p/12D3KooWAq2A7UNFS6725XFatD5QW7iYBezTLdAUx1SmRkxN79Ne", "/dns/boot.stake.plus/tcp/32333/p2p/12D3KooWK8fjVoSvMq5copQYMsdYreSGPGgcMbGMgbMDPfpf3sm7", - "/dns/boot.stake.plus/tcp/32334/wss/p2p/12D3KooWK8fjVoSvMq5copQYMsdYreSGPGgcMbGMgbMDPfpf3sm7" + "/dns/boot.stake.plus/tcp/32334/wss/p2p/12D3KooWK8fjVoSvMq5copQYMsdYreSGPGgcMbGMgbMDPfpf3sm7", + "/dns/boot-node.helikon.io/tcp/7080/p2p/12D3KooWRFDPyT8vA8mLzh6dJoyujn4QNjeqi6Ch79eSMz9beKXC", + "/dns/boot-node.helikon.io/tcp/7082/wss/p2p/12D3KooWRFDPyT8vA8mLzh6dJoyujn4QNjeqi6Ch79eSMz9beKXC" ], "telemetryEndpoints": [ [ diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index 66b3930e5baf..9a72c3c0d1b5 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -1226,22 +1226,11 @@ where } } - // Reduce grandpa load on Kusama and test networks. This will slow down finality by - // approximately one slot duration, but will reduce load. We would like to see the impact on - // Kusama, see: https://github.com/paritytech/polkadot/issues/5464 - let gossip_duration = if chain_spec.is_versi() || - chain_spec.is_wococo() || - chain_spec.is_rococo() || - chain_spec.is_kusama() - { - Duration::from_millis(2000) - } else { - Duration::from_millis(1000) - }; - let config = grandpa::Config { // FIXME substrate#1578 make this available through chainspec - gossip_duration, + // Grandpa performance can be improved a bit by tuning this parameter, see: + // https://github.com/paritytech/polkadot/issues/5464 + gossip_duration: Duration::from_millis(1000), justification_period: 512, name: Some(name), observer_enabled: false, diff --git a/node/subsystem-test-helpers/src/lib.rs b/node/subsystem-test-helpers/src/lib.rs index 79f833b7558c..dd207039091a 100644 --- a/node/subsystem-test-helpers/src/lib.rs +++ b/node/subsystem-test-helpers/src/lib.rs @@ -177,7 +177,7 @@ where /// A test subsystem context. pub struct TestSubsystemContext { tx: TestSubsystemSender, - rx: SingleItemStream>, + rx: mpsc::Receiver>, spawn: S, } @@ -239,7 +239,7 @@ pub struct TestSubsystemContextHandle { /// /// Useful for shared ownership situations (one can have multiple senders, but only one /// receiver. - pub tx: SingleItemSink>, + pub tx: mpsc::Sender>, /// Direct access to the receiver. pub rx: mpsc::UnboundedReceiver, @@ -280,11 +280,22 @@ impl TestSubsystemContextHandle { } } -/// Make a test subsystem context. +/// Make a test subsystem context with `buffer_size == 0`. This is used by most +/// of the tests. pub fn make_subsystem_context( spawner: S, ) -> (TestSubsystemContext>, TestSubsystemContextHandle) { - let (overseer_tx, overseer_rx) = single_item_sink(); + make_buffered_subsystem_context(spawner, 0) +} + +/// Make a test subsystem context with buffered overseer channel. Some tests (e.g. +/// `dispute-coordinator`) create too many parallel operations and deadlock unless +/// the channel is buffered. Usually `buffer_size=1` is enough. +pub fn make_buffered_subsystem_context( + spawner: S, + buffer_size: usize, +) -> (TestSubsystemContext>, TestSubsystemContextHandle) { + let (overseer_tx, overseer_rx) = mpsc::channel(buffer_size); let (all_messages_tx, all_messages_rx) = mpsc::unbounded(); ( diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index db45fb4b6bdc..e51cd9689c79 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -283,13 +283,13 @@ pub enum DisputeCoordinatorMessage { /// - or the imported statements are backing/approval votes, which are always accepted. pending_confirmation: Option>, }, - /// Fetch a list of all recent disputes the co-ordinator is aware of. + /// Fetch a list of all recent disputes the coordinator is aware of. /// These are disputes which have occurred any time in recent sessions, /// and which may have already concluded. RecentDisputes(oneshot::Sender>), /// Fetch a list of all active disputes that the coordinator is aware of. /// These disputes are either not yet concluded or recently concluded. - ActiveDisputes(oneshot::Sender>), + ActiveDisputes(oneshot::Sender>), /// Get candidate votes for a candidate. QueryCandidateVotes( Vec<(SessionIndex, CandidateHash)>, diff --git a/node/subsystem-util/src/rolling_session_window.rs b/node/subsystem-util/src/rolling_session_window.rs index beac31292b7d..4ebfad405b5b 100644 --- a/node/subsystem-util/src/rolling_session_window.rs +++ b/node/subsystem-util/src/rolling_session_window.rs @@ -294,6 +294,11 @@ impl RollingSessionWindow { self.earliest_session + (self.session_info.len() as SessionIndex).saturating_sub(1) } + /// Returns `true` if `session_index` is contained in the window. + pub fn contains(&self, session_index: SessionIndex) -> bool { + session_index >= self.earliest_session() && session_index <= self.latest_session() + } + async fn earliest_non_finalized_block_session( sender: &mut Sender, ) -> Result @@ -783,6 +788,21 @@ mod tests { cache_session_info_test(1, 2, Some(window), 2, None); } + #[test] + fn cache_session_window_contains() { + let window = RollingSessionWindow { + earliest_session: 10, + session_info: vec![dummy_session_info(1)], + window_size: SESSION_WINDOW_SIZE, + db_params: Some(dummy_db_params()), + }; + + assert!(!window.contains(0)); + assert!(!window.contains(10 + SESSION_WINDOW_SIZE.get())); + assert!(!window.contains(11)); + assert!(!window.contains(10 + SESSION_WINDOW_SIZE.get() - 1)); + } + #[test] fn cache_session_info_first_late() { cache_session_info_test( diff --git a/node/test/service/Cargo.toml b/node/test/service/Cargo.toml index 87111241b25a..f767bb4ae975 100644 --- a/node/test/service/Cargo.toml +++ b/node/test/service/Cargo.toml @@ -48,7 +48,7 @@ sc-network = { git = "https://github.com/paritytech/substrate", branch = "master sc-network-common = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-tracing = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-service = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, features = [ "wasmtime" ] } +sc-service = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-arithmetic = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/roadmap/implementers-guide/README.md b/roadmap/implementers-guide/README.md index 7f3f8cef7e63..996041f176bb 100644 --- a/roadmap/implementers-guide/README.md +++ b/roadmap/implementers-guide/README.md @@ -22,6 +22,11 @@ Then install and build the book: ```sh cargo install mdbook mdbook-linkcheck mdbook-graphviz mdbook-mermaid mdbook-last-changed mdbook serve roadmap/implementers-guide +``` + +and in a second terminal window run: + +```sh open http://localhost:3000 ``` diff --git a/roadmap/implementers-guide/src/SUMMARY.md b/roadmap/implementers-guide/src/SUMMARY.md index bcf87aad8a49..c504b9ac1923 100644 --- a/roadmap/implementers-guide/src/SUMMARY.md +++ b/roadmap/implementers-guide/src/SUMMARY.md @@ -75,7 +75,6 @@ - [Availability](types/availability.md) - [Overseer and Subsystem Protocol](types/overseer-protocol.md) - [Runtime](types/runtime.md) - - [Chain](types/chain.md) - [Messages](types/messages.md) - [Network](types/network.md) - [Approvals](types/approval.md) diff --git a/roadmap/implementers-guide/src/glossary.md b/roadmap/implementers-guide/src/glossary.md index 8612d8834cb8..ed6a358be6da 100644 --- a/roadmap/implementers-guide/src/glossary.md +++ b/roadmap/implementers-guide/src/glossary.md @@ -48,4 +48,3 @@ exactly one downward message queue. Also of use is the [Substrate Glossary](https://substrate.dev/docs/en/knowledgebase/getting-started/glossary). [0]: https://wiki.polkadot.network/docs/learn-consensus -[1]: #pvf diff --git a/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md b/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md index 3d44e210db0e..07fc647a711c 100644 --- a/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md +++ b/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md @@ -9,7 +9,7 @@ In particular the dispute-coordinator is responsible for: - Ensuring that the node is able to raise a dispute in case an invalid candidate is found during approval checking. -- Ensuring lazy approval votes (votes given without running the parachain +- Ensuring lazy approval votes (votes given without running the parachain validation function) will be recorded, so lazy nodes can get slashed properly. - Coordinating actual participation in a dispute, ensuring that the node participates in any justified dispute in a way that ensures resolution of @@ -84,18 +84,18 @@ While there is no need to record approval votes in the dispute coordinator preemptively, we do need to make sure they are recorded when a dispute actually happens. This is because only votes recorded by the dispute coordinator will be considered for slashing. It is sufficient for our -threat model that malicious backers are slashed as opposed to both backers and -approval checkers. However, we still must import approval votes from the approvals -process into the disputes process to ensure that lazy approval checkers -actually run the parachain validation function. Slashing lazy approval checkers is necessary, else we risk a useless approvals process where every approval -checker blindly votes valid for every candidate. If we did not import approval -votes, lazy nodes would likely cast a properly checked explicit vote as part -of the dispute in addition to their blind approval vote and thus avoid a slash. -With the 2/3rd honest assumption it seems unrealistic that lazy approval voters -will keep sending unchecked approval votes once they became aware of a raised -dispute. Hence the most crucial approval votes to import are the early ones -(tranche 0), to take into account network latencies and such we still want to -import approval votes at a later point in time as well (in particular we need +threat model that malicious backers are slashed as opposed to both backers and +approval checkers. However, we still must import approval votes from the approvals +process into the disputes process to ensure that lazy approval checkers +actually run the parachain validation function. Slashing lazy approval checkers is necessary, else we risk a useless approvals process where every approval +checker blindly votes valid for every candidate. If we did not import approval +votes, lazy nodes would likely cast a properly checked explicit vote as part +of the dispute in addition to their blind approval vote and thus avoid a slash. +With the 2/3rd honest assumption it seems unrealistic that lazy approval voters +will keep sending unchecked approval votes once they became aware of a raised +dispute. Hence the most crucial approval votes to import are the early ones +(tranche 0), to take into account network latencies and such we still want to +import approval votes at a later point in time as well (in particular we need to make sure the dispute can conclude, but more on that later). As mentioned already previously, importing votes is most efficient when batched. @@ -202,11 +202,11 @@ time participation is faster than approval, a node would do double work. ### Ensuring Chain Import While in the previous section we discussed means for nodes to ensure relevant -votes are recorded so lazy approval checkers get slashed properly, it is crucial -to also discuss the actual chain import. Only if we guarantee that recorded votes -will also get imported on chain (on all potential chains really) we will succeed -in executing slashes. Particularly we need to make sure backing votes end up on -chain consistantly. In contrast recording and slashing lazy approval voters only +votes are recorded so lazy approval checkers get slashed properly, it is crucial +to also discuss the actual chain import. Only if we guarantee that recorded votes +will also get imported on chain (on all potential chains really) we will succeed +in executing slashes. Particularly we need to make sure backing votes end up on +chain consistantly. In contrast recording and slashing lazy approval voters only needs to be likely, not certain. Dispute distribution will make sure all explicit dispute votes get distributed @@ -227,14 +227,14 @@ production in the current set - they might only exist on an already abandoned fork. This means a block producer that just joined the set, might not have seen any of them. -For approvals it is even more tricky and less necessary: Approval voting together -with finalization is a completely off-chain process therefore those protocols -don't care about block production at all. Approval votes only have a guarantee of -being propagated between the nodes that are responsible for finalizing the -concerned blocks. This implies that on an era change the current authority set, -will not necessarily get informed about any approval votes for the previous era. -Hence even if all validators of the previous era successfully recorded all approval -votes in the dispute coordinator, they won't get a chance to put them on chain, +For approvals it is even more tricky and less necessary: Approval voting together +with finalization is a completely off-chain process therefore those protocols +don't care about block production at all. Approval votes only have a guarantee of +being propagated between the nodes that are responsible for finalizing the +concerned blocks. This implies that on an era change the current authority set, +will not necessarily get informed about any approval votes for the previous era. +Hence even if all validators of the previous era successfully recorded all approval +votes in the dispute coordinator, they won't get a chance to put them on chain, hence they won't be considered for slashing. It is important to note, that the essential properties of the system still hold: @@ -359,14 +359,19 @@ times instead of just once to the oldest offender. This is obviously a good idea, in particular it makes it impossible for an attacker to prevent rolling back a very old candidate, by keeping raising disputes for newer candidates. -For candidates we have not seen included, but we have our availability piece -available we put participation on a best-effort queue, which at the moment is -processed on the basis how often we requested participation locally, which -equals the number of times we imported votes for that dispute. The idea is, if -we have not seen the candidate included, but the dispute is valid, other nodes -will have seen it included - so the more votes there are, the more likely it is -a valid dispute and we should implicitly arrive at a similar ordering as the -nodes that are able to sort based on the relay parent block height. +For candidates we have not seen included, but we know are backed (thanks to chain +scraping) or we have seen a dispute with 1/3+1 participation (confirmed dispute) +on them - we put participation on a best-effort queue. It has got the same +ordering as the priority one - by block heights of the relay parent, older blocks +are with priority. There is a possibility not to be able to obtain the block number +of the parent when we are inserting the dispute in the queue. The reason for this +is either the dispute is completely made up or we are out of sync with the other +nodes in terms of last finalized block. The former is very unlikely. If we are +adding a dispute in best-effort it should already be either confirmed or the +candidate is backed. In the latter case we will promote the dispute to the +priority queue once we learn about the new block. NOTE: this is still work in +progress and is tracked by [this issue] +(https://github.com/paritytech/polkadot/issues/5875). #### Import @@ -381,6 +386,12 @@ dispute coordinator level (dispute-distribution also has its own), which is spam slots. For each import, where we don't know whether it might be spam or not we increment a counter for each signing participant of explicit `invalid` votes. +What votes do we treat as a potential spam? A vote will increase a spam slot if +and only if all of the following condidions are satisfied: +* the candidate under dispute is not included on any chain +* the dispute is not confirmed +* we haven't casted a vote for the dispute + The reason this works is because we only need to worry about actual dispute votes. Import of backing votes are already rate limited and concern only real candidates for approval votes a similar argument holds (if they come from diff --git a/roadmap/implementers-guide/src/node/utility/candidate-validation.md b/roadmap/implementers-guide/src/node/utility/candidate-validation.md index 5393368c5c6b..4e67be069155 100644 --- a/roadmap/implementers-guide/src/node/utility/candidate-validation.md +++ b/roadmap/implementers-guide/src/node/utility/candidate-validation.md @@ -48,4 +48,39 @@ Once we have all parameters, we can spin up a background task to perform the val If we can assume the presence of the relay-chain state (that is, during processing [`CandidateValidationMessage`][CVM]`::ValidateFromChainState`) we can run all the checks that the relay-chain would run at the inclusion time thus confirming that the candidate will be accepted. +### PVF Host + +The PVF host is responsible for handling requests to prepare and execute PVF +code blobs. + +One high-level goal is to make PVF operations as deterministic as possible, to +reduce the rate of disputes. Disputes can happen due to e.g. a job timing out on +one machine, but not another. While we do not yet have full determinism, there +are some dispute reduction mechanisms in place right now. + +#### Retrying execution requests + +If the execution request fails during **preparation**, we will retry if it is +possible that the preparation error was transient (e.g. if the error was a panic +or time out). We will only retry preparation if another request comes in after +15 minutes, to ensure any potential transient conditions had time to be +resolved. We will retry up to 5 times. + +If the actual **execution** of the artifact fails, we will retry once if it was +an ambiguous error after a brief delay, to allow any potential transient +conditions to clear. + +#### Preparation timeouts + +We use timeouts for both preparation and execution jobs to limit the amount of +time they can take. As the time for a job can vary depending on the machine and +load on the machine, this can potentially lead to disputes where some validators +successfuly execute a PVF and others don't. + +One mitigation we have in place is a more lenient timeout for preparation during +execution than during pre-checking. The rationale is that the PVF has already +passed pre-checking, so we know it should be valid, and we allow it to take +longer than expected, as this is likely due to an issue with the machine and not +the PVF. + [CVM]: ../../types/overseer-protocol.md#validationrequesttype diff --git a/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md b/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md index b0f58346da99..fd75ce9e3804 100644 --- a/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md +++ b/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md @@ -12,11 +12,11 @@ This subsytem does not produce any output messages either. The subsystem will, h If the node is running in a collator mode, this subsystem will be disabled. The PVF pre-checker subsystem keeps track of the PVFs that are relevant for the subsystem. -To be relevant for the subsystem, a PVF must be returned by `pvfs_require_precheck` [`pvfs_require_precheck` runtime API][PVF pre-checking runtime API] in any of the active leaves. If the PVF is not present in any of the active leaves, it ceases to be relevant. +To be relevant for the subsystem, a PVF must be returned by the [`pvfs_require_precheck` runtime API][PVF pre-checking runtime API] in any of the active leaves. If the PVF is not present in any of the active leaves, it ceases to be relevant. When a PVF just becomes relevant, the subsystem will send a message to the [Candidate Validation] subsystem asking for the pre-check. -Upon receving a message from the candidate-validation subsystem, the pre-checker will note down that the PVF has its judgement and will also sign and submit a [`PvfCheckStatement`] via the [`submit_pvf_check_statement` runtime API][PVF pre-checking runtime API]. In case, a judgement was received for a PVF that is no longer in view it is ignored. It is possible that the candidate validation was not able to check the PVF. In that case, the PVF pre-checker will abstain and won't submit any check statements. +Upon receving a message from the candidate-validation subsystem, the pre-checker will note down that the PVF has its judgement and will also sign and submit a [`PvfCheckStatement`][PvfCheckStatement] via the [`submit_pvf_check_statement` runtime API][PVF pre-checking runtime API]. In case, a judgement was received for a PVF that is no longer in view it is ignored. It is possible that the candidate validation was not able to check the PVF. In that case, the PVF pre-checker will abstain and won't submit any check statements. Since a vote only is valid during [one session][overview], the subsystem will have to resign and submit the statements for for the new session. The new session is assumed to be started if at least one of the leaves has a greater session index that was previously observed in any of the leaves. @@ -28,4 +28,4 @@ If the node is not in the active validator set, it will still perform all the ch [Runtime API]: runtime-api.md [PVF pre-checking runtime API]: ../../runtime-api/pvf-prechecking.md [Candidate Validation]: candidate-validation.md -[`PvfCheckStatement`]: ../../types/pvf-prechecking.md +[PvfCheckStatement]: ../../types/pvf-prechecking.md#pvfcheckstatement diff --git a/roadmap/implementers-guide/src/types/candidate.md b/roadmap/implementers-guide/src/types/candidate.md index b9d5900b7f17..baad5b07e6cd 100644 --- a/roadmap/implementers-guide/src/types/candidate.md +++ b/roadmap/implementers-guide/src/types/candidate.md @@ -92,6 +92,22 @@ struct CandidateDescriptor { } ``` +## `ValidationParams` + +```rust +/// Validation parameters for evaluating the parachain validity function. +pub struct ValidationParams { + /// Previous head-data. + pub parent_head: HeadData, + /// The collation body. + pub block_data: BlockData, + /// The current relay-chain block number. + pub relay_parent_number: RelayChainBlockNumber, + /// The relay-chain block's storage root. + pub relay_parent_storage_root: Hash, +} +``` + ## `PersistedValidationData` The validation data provides information about how to create the inputs for validation of a candidate. This information is derived from the chain state and will vary from para to para, although some of the fields may be the same for every para. diff --git a/roadmap/implementers-guide/src/types/chain.md b/roadmap/implementers-guide/src/types/chain.md deleted file mode 100644 index e8ec6cea8f4a..000000000000 --- a/roadmap/implementers-guide/src/types/chain.md +++ /dev/null @@ -1,30 +0,0 @@ -# Chain - -Types pertaining to the relay-chain - events, structures, etc. - -## Block Import Event - -```rust -/// Indicates that a new block has been added to the blockchain. -struct BlockImportEvent { - /// The block header-hash. - hash: Hash, - /// The header itself. - header: Header, - /// Whether this block is considered the head of the best chain according to the - /// event emitter's fork-choice rule. - new_best: bool, -} -``` - -## Block Finalization Event - -```rust -/// Indicates that a new block has been finalized. -struct BlockFinalizationEvent { - /// The block header-hash. - hash: Hash, - /// The header of the finalized block. - header: Header, -} -``` diff --git a/roadmap/implementers-guide/src/types/overseer-protocol.md b/roadmap/implementers-guide/src/types/overseer-protocol.md index 06635bd15127..3422dff1616a 100644 --- a/roadmap/implementers-guide/src/types/overseer-protocol.md +++ b/roadmap/implementers-guide/src/types/overseer-protocol.md @@ -681,9 +681,7 @@ enum ProvisionerMessage { The Runtime API subsystem is responsible for providing an interface to the state of the chain's runtime. -This is fueled by an auxiliary type encapsulating all request types defined in the Runtime API section of the guide. - -> To do: link to the Runtime API section. Not possible currently because of https://github.com/Michael-F-Bryan/mdbook-linkcheck/issues/25. Once v0.7.1 is released it will work. +This is fueled by an auxiliary type encapsulating all request types defined in the [Runtime API section](../runtime-api) of the guide. ```rust enum RuntimeApiRequest { diff --git a/roadmap/implementers-guide/src/types/pvf-prechecking.md b/roadmap/implementers-guide/src/types/pvf-prechecking.md index 331429bd1fc5..f68f1e60feee 100644 --- a/roadmap/implementers-guide/src/types/pvf-prechecking.md +++ b/roadmap/implementers-guide/src/types/pvf-prechecking.md @@ -1,5 +1,7 @@ # PVF Pre-checking types +## `PvfCheckStatement` + > ⚠️ This type was added in v2. One of the main units of information on which PVF pre-checking voting is build is the `PvfCheckStatement`. diff --git a/runtime/common/src/slots/mod.rs b/runtime/common/src/slots/mod.rs index 673d9f86e1bd..21391dc3d774 100644 --- a/runtime/common/src/slots/mod.rs +++ b/runtime/common/src/slots/mod.rs @@ -1039,8 +1039,8 @@ mod benchmarking { // Worst case scenario, T parathreads onboard, and C parachains offboard. manage_lease_period_start { // Assume reasonable maximum of 100 paras at any time - let c in 1 .. 100; - let t in 1 .. 100; + let c in 0 .. 100; + let t in 0 .. 100; let period_begin = 1u32.into(); let period_count = 4u32.into(); diff --git a/scripts/ci/gitlab/lingua.dic b/scripts/ci/gitlab/lingua.dic index adc85edcefb0..6d78e484265a 100644 --- a/scripts/ci/gitlab/lingua.dic +++ b/scripts/ci/gitlab/lingua.dic @@ -209,6 +209,7 @@ preconfigured preimage/MS preopen prepend/G +prevalidating prevalidation preverify/G programmatically diff --git a/scripts/ci/gitlab/pipeline/build.yml b/scripts/ci/gitlab/pipeline/build.yml index c6fe5916c3b6..1553456a9e7b 100644 --- a/scripts/ci/gitlab/pipeline/build.yml +++ b/scripts/ci/gitlab/pipeline/build.yml @@ -22,7 +22,6 @@ build-linux-stable: RUN_UI_TESTS: 1 script: - time cargo build --profile testnet --features pyroscope --verbose --bin polkadot - - sccache -s # pack artifacts - mkdir -p ./artifacts - VERSION="${CI_COMMIT_REF_NAME}" # will be tag or branch name @@ -98,7 +97,6 @@ build-malus: - .collect-artifacts script: - time cargo build --profile testnet --verbose -p polkadot-test-malus - - sccache -s # pack artifacts - mkdir -p ./artifacts - mv ./target/testnet/malus ./artifacts/. @@ -165,17 +163,18 @@ build-implementers-guide: - job: test-deterministic-wasm artifacts: false extends: - - .docker-env + - .kubernetes-env - .test-refs - .collect-artifacts-short + # git depth is set on purpose: https://github.com/paritytech/polkadot/issues/6284 + variables: + GIT_STRATEGY: clone + GIT_DEPTH: 0 + CI_IMAGE: paritytech/mdbook-utils:e14aae4a-20221123 script: - - apt-get -y update; apt-get install -y graphviz - - cargo install mdbook mdbook-mermaid mdbook-linkcheck mdbook-graphviz mdbook-last-changed - mdbook build ./roadmap/implementers-guide - mkdir -p artifacts - mv roadmap/implementers-guide/book artifacts/ - # FIXME: remove me after CI image gets nonroot - - chown -R nonroot:nonroot artifacts/ build-short-benchmark: stage: build diff --git a/scripts/ci/gitlab/pipeline/publish.yml b/scripts/ci/gitlab/pipeline/publish.yml index 12e087e188d1..3484fcae336e 100644 --- a/scripts/ci/gitlab/pipeline/publish.yml +++ b/scripts/ci/gitlab/pipeline/publish.yml @@ -7,6 +7,7 @@ publish-polkadot-debug-image: stage: publish extends: + - .kubernetes-env - .build-push-image rules: # Don't run when triggered from another pipeline @@ -18,6 +19,7 @@ publish-polkadot-debug-image: - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 variables: + CI_IMAGE: ${BUILDAH_IMAGE} GIT_STRATEGY: none DOCKER_USER: ${PARITYPR_USER} DOCKER_PASS: ${PARITYPR_PASS} @@ -42,9 +44,11 @@ publish-test-collators-image: # service image for Simnet stage: publish extends: + - .kubernetes-env - .build-push-image - .zombienet-refs variables: + CI_IMAGE: ${BUILDAH_IMAGE} GIT_STRATEGY: none DOCKER_USER: ${PARITYPR_USER} DOCKER_PASS: ${PARITYPR_PASS} @@ -68,9 +72,11 @@ publish-malus-image: # service image for Simnet stage: publish extends: + - .kubernetes-env - .build-push-image - .zombienet-refs variables: + CI_IMAGE: ${BUILDAH_IMAGE} GIT_STRATEGY: none DOCKER_USER: ${PARITYPR_USER} DOCKER_PASS: ${PARITYPR_PASS} @@ -93,9 +99,11 @@ publish-malus-image: publish-staking-miner-image: stage: publish extends: + - .kubernetes-env - .build-push-image - .publish-refs variables: + CI_IMAGE: ${BUILDAH_IMAGE} # scripts/ci/dockerfiles/staking-miner/staking-miner_injected.Dockerfile DOCKERFILE: ci/dockerfiles/staking-miner/staking-miner_injected.Dockerfile IMAGE_NAME: docker.io/paritytech/staking-miner @@ -114,8 +122,8 @@ publish-s3-release: needs: - job: build-linux-stable artifacts: true - image: paritytech/awscli:latest variables: + CI_IMAGE: paritytech/awscli:latest GIT_STRATEGY: none PREFIX: "builds/polkadot/${ARCH}-${DOCKER_OS}" rules: @@ -152,9 +160,8 @@ publish-rustdoc: stage: publish extends: - .kubernetes-env - image: paritytech/tools:latest variables: - GIT_DEPTH: 100 + CI_IMAGE: paritytech/tools:latest rules: - if: $CI_PIPELINE_SOURCE == "pipeline" when: never diff --git a/xcm/src/v0/multi_location.rs b/xcm/src/v0/multi_location.rs index 0491e27d644f..1bf49ad841a6 100644 --- a/xcm/src/v0/multi_location.rs +++ b/xcm/src/v0/multi_location.rs @@ -356,17 +356,30 @@ impl MultiLocation { /// # } /// ``` pub fn match_and_split(&self, prefix: &MultiLocation) -> Option<&Junction> { - if prefix.len() + 1 != self.len() { + if prefix.len() + 1 != self.len() || !self.starts_with(prefix) { return None } - for i in 0..prefix.len() { - if prefix.at(i) != self.at(i) { - return None - } - } return self.at(prefix.len()) } + /// Returns whether `self` begins with or is equal to `prefix`. + /// + /// # Example + /// ```rust + /// # use xcm::v0::{Junction::*, MultiLocation::*}; + /// let m = X4(Parent, PalletInstance(3), OnlyChild, OnlyChild); + /// assert!(m.starts_with(&X2(Parent, PalletInstance(3)))); + /// assert!(m.starts_with(&m)); + /// assert!(!m.starts_with(&X2(Parent, GeneralIndex(99)))); + /// assert!(!m.starts_with(&X1(PalletInstance(3)))); + /// ``` + pub fn starts_with(&self, prefix: &MultiLocation) -> bool { + if self.len() < prefix.len() { + return false + } + prefix.iter().zip(self.iter()).all(|(l, r)| l == r) + } + /// Mutates `self`, suffixing it with `new`. Returns `Err` in case of overflow. pub fn push(&mut self, new: Junction) -> result::Result<(), ()> { let mut n = MultiLocation::Null; @@ -601,6 +614,24 @@ mod tests { assert_eq!(m.match_and_split(&m), None); } + #[test] + fn starts_with_works() { + let full = X3(Parent, Parachain(1000), AccountIndex64 { network: Any, index: 23 }); + let identity = full.clone(); + let prefix = X2(Parent, Parachain(1000)); + let wrong_parachain = X2(Parent, Parachain(1001)); + let wrong_account = X3(Parent, Parachain(1000), AccountIndex64 { network: Any, index: 24 }); + let no_parents = X1(Parachain(1000)); + let too_many_parents = X3(Parent, Parent, Parachain(1000)); + + assert!(full.starts_with(&identity)); + assert!(full.starts_with(&prefix)); + assert!(!full.starts_with(&wrong_parachain)); + assert!(!full.starts_with(&wrong_account)); + assert!(!full.starts_with(&no_parents)); + assert!(!full.starts_with(&too_many_parents)); + } + #[test] fn append_with_works() { let acc = AccountIndex64 { network: Any, index: 23 }; diff --git a/xcm/src/v1/multilocation.rs b/xcm/src/v1/multilocation.rs index 0c2b2da31698..83cf0095c3b8 100644 --- a/xcm/src/v1/multilocation.rs +++ b/xcm/src/v1/multilocation.rs @@ -253,6 +253,24 @@ impl MultiLocation { self.interior.match_and_split(&prefix.interior) } + /// Returns whether `self` has the same number of parents as `prefix` and its junctions begins + /// with the junctions of `prefix`. + /// + /// # Example + /// ```rust + /// # use xcm::v1::{Junctions::*, Junction::*, MultiLocation}; + /// let m = MultiLocation::new(1, X3(PalletInstance(3), OnlyChild, OnlyChild)); + /// assert!(m.starts_with(&MultiLocation::new(1, X1(PalletInstance(3))))); + /// assert!(!m.starts_with(&MultiLocation::new(1, X1(GeneralIndex(99))))); + /// assert!(!m.starts_with(&MultiLocation::new(0, X1(PalletInstance(3))))); + /// ``` + pub fn starts_with(&self, prefix: &MultiLocation) -> bool { + if self.parents != prefix.parents { + return false + } + self.interior.starts_with(&prefix.interior) + } + /// Mutate `self` so that it is suffixed with `suffix`. /// /// Does not modify `self` and returns `Err` with `suffix` in case of overflow. @@ -801,15 +819,29 @@ impl Junctions { /// # } /// ``` pub fn match_and_split(&self, prefix: &Junctions) -> Option<&Junction> { - if prefix.len() + 1 != self.len() { + if prefix.len() + 1 != self.len() || !self.starts_with(prefix) { return None } - for i in 0..prefix.len() { - if prefix.at(i) != self.at(i) { - return None - } + self.at(prefix.len()) + } + + /// Returns whether `self` begins with or is equal to `prefix`. + /// + /// # Example + /// ```rust + /// # use xcm::v1::{Junctions::*, Junction::*}; + /// let mut j = X3(Parachain(2), PalletInstance(3), OnlyChild); + /// assert!(j.starts_with(&X2(Parachain(2), PalletInstance(3)))); + /// assert!(j.starts_with(&j)); + /// assert!(j.starts_with(&X1(Parachain(2)))); + /// assert!(!j.starts_with(&X1(Parachain(999)))); + /// assert!(!j.starts_with(&X4(Parachain(2), PalletInstance(3), OnlyChild, OnlyChild))); + /// ``` + pub fn starts_with(&self, prefix: &Junctions) -> bool { + if self.len() < prefix.len() { + return false } - return self.at(prefix.len()) + prefix.iter().zip(self.iter()).all(|(l, r)| l == r) } } @@ -929,6 +961,26 @@ mod tests { assert_eq!(m.match_and_split(&m), None); } + #[test] + fn starts_with_works() { + let full: MultiLocation = + (Parent, Parachain(1000), AccountId32 { network: Any, id: [0; 32] }).into(); + let identity: MultiLocation = full.clone(); + let prefix: MultiLocation = (Parent, Parachain(1000)).into(); + let wrong_parachain: MultiLocation = (Parent, Parachain(1001)).into(); + let wrong_account: MultiLocation = + (Parent, Parachain(1000), AccountId32 { network: Any, id: [1; 32] }).into(); + let no_parents: MultiLocation = (Parachain(1000)).into(); + let too_many_parents: MultiLocation = (Parent, Parent, Parachain(1000)).into(); + + assert!(full.starts_with(&identity)); + assert!(full.starts_with(&prefix)); + assert!(!full.starts_with(&wrong_parachain)); + assert!(!full.starts_with(&wrong_account)); + assert!(!full.starts_with(&no_parents)); + assert!(!full.starts_with(&too_many_parents)); + } + #[test] fn append_with_works() { let acc = AccountIndex64 { network: Any, index: 23 };